mlx5-fixes-2021-10-20
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmFwVRsACgkQSD+KveBX +j7yPQgAzD/7Dgf7YpXbvaLRn39TYMiEm+fvRjJUlsom4pnJnN4twgaGyZHMv35j IajglyClyJII+kZQ3bB+f3p3ryFJPpLED59hCqCeTSIhI78F569k/5UFo7bpwvqN ce1llNricBY2iFN6XnqP+pT/DmNbroh8VMqX2uG5wCk6YnIhFNNz2pV8UhhhHPX1 nEDzJ7fSX+Xw1tXoAZWQtgEUEMgnLKzXgRkQ2Rp/zl41LVM1g6ecCmWKeDIF7YtR Jg967xLDzulamSJwMRZ2Ioa5J5XvDA2jMKHCa1FKgcRyryFWOvAIRcmZmh5oMkja r66bgDsXXRD2f1LTbKQlEyo3ilG6pA== =+7Xk -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2021-10-20' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5-fixes-2021-10-20 ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
e0bfcf9c77
@ -199,6 +199,9 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
|
||||
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
|
||||
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
|
||||
|
||||
int mlx5e_fs_init(struct mlx5e_priv *priv);
|
||||
void mlx5e_fs_cleanup(struct mlx5e_priv *priv);
|
||||
|
||||
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
|
||||
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
|
||||
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
|
||||
|
@ -10,6 +10,8 @@
|
||||
#include "en_tc.h"
|
||||
#include "rep/tc.h"
|
||||
#include "rep/neigh.h"
|
||||
#include "lag.h"
|
||||
#include "lag_mp.h"
|
||||
|
||||
struct mlx5e_tc_tun_route_attr {
|
||||
struct net_device *out_dev;
|
||||
|
@ -141,8 +141,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
|
||||
* Pkt: MAC IP ESP IP L4
|
||||
*
|
||||
* Transport Mode:
|
||||
* SWP: OutL3 InL4
|
||||
* InL3
|
||||
* SWP: OutL3 OutL4
|
||||
* Pkt: MAC IP ESP L4
|
||||
*
|
||||
* Tunnel(VXLAN TCP/UDP) over Transport Mode
|
||||
@ -171,31 +170,35 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
|
||||
return;
|
||||
|
||||
if (!xo->inner_ipproto) {
|
||||
eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
|
||||
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
|
||||
if (skb->protocol == htons(ETH_P_IPV6))
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
|
||||
if (xo->proto == IPPROTO_UDP)
|
||||
switch (xo->proto) {
|
||||
case IPPROTO_UDP:
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
|
||||
fallthrough;
|
||||
case IPPROTO_TCP:
|
||||
/* IP | ESP | TCP */
|
||||
eseg->swp_outer_l4_offset = skb_inner_transport_offset(skb) / 2;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
/* Tunnel(VXLAN TCP/UDP) over Transport Mode */
|
||||
switch (xo->inner_ipproto) {
|
||||
case IPPROTO_UDP:
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
|
||||
return;
|
||||
fallthrough;
|
||||
case IPPROTO_TCP:
|
||||
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
|
||||
eseg->swp_inner_l4_offset =
|
||||
(skb->csum_start + skb->head - skb->data) / 2;
|
||||
if (skb->protocol == htons(ETH_P_IPV6))
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Tunnel(VXLAN TCP/UDP) over Transport Mode */
|
||||
switch (xo->inner_ipproto) {
|
||||
case IPPROTO_UDP:
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
|
||||
fallthrough;
|
||||
case IPPROTO_TCP:
|
||||
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
|
||||
eseg->swp_inner_l4_offset = (skb->csum_start + skb->head - skb->data) / 2;
|
||||
if (skb->protocol == htons(ETH_P_IPV6))
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
|
||||
|
@ -1186,10 +1186,6 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
|
||||
struct mlx5e_flow_table *ft;
|
||||
int err;
|
||||
|
||||
priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
|
||||
if (!priv->fs.vlan)
|
||||
return -ENOMEM;
|
||||
|
||||
ft = &priv->fs.vlan->ft;
|
||||
ft->num_groups = 0;
|
||||
|
||||
@ -1198,10 +1194,8 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
|
||||
ft_attr.prio = MLX5E_NIC_PRIO;
|
||||
|
||||
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
|
||||
if (IS_ERR(ft->t)) {
|
||||
err = PTR_ERR(ft->t);
|
||||
goto err_free_t;
|
||||
}
|
||||
if (IS_ERR(ft->t))
|
||||
return PTR_ERR(ft->t);
|
||||
|
||||
ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
|
||||
if (!ft->g) {
|
||||
@ -1221,9 +1215,6 @@ err_free_g:
|
||||
kfree(ft->g);
|
||||
err_destroy_vlan_table:
|
||||
mlx5_destroy_flow_table(ft->t);
|
||||
err_free_t:
|
||||
kvfree(priv->fs.vlan);
|
||||
priv->fs.vlan = NULL;
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -1232,7 +1223,6 @@ static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
|
||||
{
|
||||
mlx5e_del_vlan_rules(priv);
|
||||
mlx5e_destroy_flow_table(&priv->fs.vlan->ft);
|
||||
kvfree(priv->fs.vlan);
|
||||
}
|
||||
|
||||
static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
|
||||
@ -1351,3 +1341,17 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
|
||||
mlx5e_arfs_destroy_tables(priv);
|
||||
mlx5e_ethtool_cleanup_steering(priv);
|
||||
}
|
||||
|
||||
int mlx5e_fs_init(struct mlx5e_priv *priv)
|
||||
{
|
||||
priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
|
||||
if (!priv->fs.vlan)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx5e_fs_cleanup(struct mlx5e_priv *priv)
|
||||
{
|
||||
kvfree(priv->fs.vlan);
|
||||
priv->fs.vlan = NULL;
|
||||
}
|
||||
|
@ -4578,6 +4578,12 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
|
||||
|
||||
mlx5e_timestamp_init(priv);
|
||||
|
||||
err = mlx5e_fs_init(priv);
|
||||
if (err) {
|
||||
mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlx5e_ipsec_init(priv);
|
||||
if (err)
|
||||
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
|
||||
@ -4595,6 +4601,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
|
||||
mlx5e_health_destroy_reporters(priv);
|
||||
mlx5e_tls_cleanup(priv);
|
||||
mlx5e_ipsec_cleanup(priv);
|
||||
mlx5e_fs_cleanup(priv);
|
||||
}
|
||||
|
||||
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
|
||||
|
@ -67,6 +67,8 @@
|
||||
#include "lib/fs_chains.h"
|
||||
#include "diag/en_tc_tracepoint.h"
|
||||
#include <asm/div64.h>
|
||||
#include "lag.h"
|
||||
#include "lag_mp.h"
|
||||
|
||||
#define nic_chains(priv) ((priv)->fs.tc.chains)
|
||||
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
|
||||
|
@ -213,19 +213,18 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
|
||||
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
|
||||
}
|
||||
|
||||
/* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet),
|
||||
* need to set L3 checksum flag for IPsec
|
||||
*/
|
||||
static void
|
||||
ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
struct mlx5_wqe_eth_seg *eseg)
|
||||
{
|
||||
struct xfrm_offload *xo = xfrm_offload(skb);
|
||||
|
||||
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
|
||||
if (skb->encapsulation) {
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
|
||||
if (xo->inner_ipproto) {
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM;
|
||||
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
|
||||
sq->stats->csum_partial_inner++;
|
||||
} else {
|
||||
sq->stats->csum_partial++;
|
||||
}
|
||||
}
|
||||
|
||||
@ -234,6 +233,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
struct mlx5e_accel_tx_state *accel,
|
||||
struct mlx5_wqe_eth_seg *eseg)
|
||||
{
|
||||
if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
|
||||
ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
|
||||
return;
|
||||
}
|
||||
|
||||
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
|
||||
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
|
||||
if (skb->encapsulation) {
|
||||
@ -249,8 +253,6 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
|
||||
sq->stats->csum_partial++;
|
||||
#endif
|
||||
} else if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
|
||||
ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
|
||||
} else
|
||||
sq->stats->csum_none++;
|
||||
}
|
||||
|
@ -473,10 +473,9 @@ esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
|
||||
|
||||
err_min_rate:
|
||||
list_del(&group->list);
|
||||
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
|
||||
SCHEDULING_HIERARCHY_E_SWITCH,
|
||||
group->tsar_ix);
|
||||
if (err)
|
||||
if (mlx5_destroy_scheduling_element_cmd(esw->dev,
|
||||
SCHEDULING_HIERARCHY_E_SWITCH,
|
||||
group->tsar_ix))
|
||||
NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for group failed");
|
||||
err_sched_elem:
|
||||
kfree(group);
|
||||
|
@ -442,6 +442,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
|
||||
if (!mlx5_lag_is_ready(ldev)) {
|
||||
do_bond = false;
|
||||
} else {
|
||||
/* VF LAG is in multipath mode, ignore bond change requests */
|
||||
if (mlx5_lag_is_multipath(dev0))
|
||||
return;
|
||||
|
||||
tracker = ldev->tracker;
|
||||
|
||||
do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
|
||||
|
@ -9,20 +9,23 @@
|
||||
#include "eswitch.h"
|
||||
#include "lib/mlx5.h"
|
||||
|
||||
static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
|
||||
{
|
||||
return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
|
||||
}
|
||||
|
||||
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
|
||||
{
|
||||
if (!mlx5_lag_is_ready(ldev))
|
||||
return false;
|
||||
|
||||
if (__mlx5_lag_is_active(ldev) && !__mlx5_lag_is_multipath(ldev))
|
||||
return false;
|
||||
|
||||
return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
|
||||
ldev->pf[MLX5_LAG_P2].dev);
|
||||
}
|
||||
|
||||
static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
|
||||
{
|
||||
return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
|
||||
}
|
||||
|
||||
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_lag *ldev;
|
||||
|
@ -24,12 +24,14 @@ struct lag_mp {
|
||||
void mlx5_lag_mp_reset(struct mlx5_lag *ldev);
|
||||
int mlx5_lag_mp_init(struct mlx5_lag *ldev);
|
||||
void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev);
|
||||
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
|
||||
|
||||
#else /* CONFIG_MLX5_ESWITCH */
|
||||
|
||||
static inline void mlx5_lag_mp_reset(struct mlx5_lag *ldev) {};
|
||||
static inline int mlx5_lag_mp_init(struct mlx5_lag *ldev) { return 0; }
|
||||
static inline void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) {}
|
||||
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev) { return false; }
|
||||
|
||||
#endif /* CONFIG_MLX5_ESWITCH */
|
||||
#endif /* __MLX5_LAG_MP_H__ */
|
||||
|
@ -1138,7 +1138,6 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
|
||||
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
|
||||
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
|
||||
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
|
||||
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
|
||||
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
|
||||
bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
|
||||
bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
|
||||
|
Loading…
Reference in New Issue
Block a user