mlx5-fixes-2020-11-17
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAl+0KZ4ACgkQSD+KveBX +j5DDAgAip1LGfOUq7RISh1OWQ9zLl2KT/mmbdSioObGgjKunU4lMGZAPrLB0bpe RH1RodslY+1bepcz7VQ/QxbL0EU605SZn08xLZIQAYYXT0Sar+hhg7h7vhD0iSA1 dwDF/XLQYHf8snc3x/tu6/zp9BzfzRfuiieYAbC1ri97Iz9uoYO+QcRI7OyqgXWf LmK9/s6WzgaC0DKg8XXEQGp7F1MbZfJph6tThZnF62NY9Rrkut6AEah85p1QaqF7 9WLjqOtJwo893fzbN+WCXJTYo2UX9oU18nUSvSoYid2LEofiwY3PcT8R0gKSPZto cVcjm1oGsotbonBnPlR2eMZq8LdPDQ== =MKi5 -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2020-11-17' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5 fixes 2020-11-17 This series introduces some fixes to mlx5 driver. * tag 'mlx5-fixes-2020-11-17' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux: net/mlx5: fix error return code in mlx5e_tc_nic_init() net/mlx5: E-Switch, Fail mlx5_esw_modify_vport_rate if qos disabled net/mlx5: Disable QoS when min_rates on all VFs are zero net/mlx5: Clear bw_share upon VF disable net/mlx5: Add handling of port type in rule deletion net/mlx5e: Fix check if netdev is bond slave net/mlx5e: Fix IPsec packet drop by mlx5e_tc_update_skb net/mlx5e: Set IPsec WAs only in IP's non checksum partial case. net/mlx5e: Fix refcount leak on kTLS RX resync ==================== Link: https://lore.kernel.org/r/20201117195702.386113-1-saeedm@nvidia.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
f93e8497a9
@ -187,7 +187,7 @@ static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
|
||||
struct mlx5e_priv *priv;
|
||||
|
||||
/* A given netdev is not a representor or not a slave of LAG configuration */
|
||||
if (!mlx5e_eswitch_rep(netdev) || !bond_slave_get_rtnl(netdev))
|
||||
if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev))
|
||||
return false;
|
||||
|
||||
priv = netdev_priv(netdev);
|
||||
|
@ -64,13 +64,13 @@ static int rx_err_add_rule(struct mlx5e_priv *priv,
|
||||
if (!spec)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Action to copy 7 bit ipsec_syndrome to regB[0:6] */
|
||||
/* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
|
||||
MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
|
||||
MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
|
||||
MLX5_SET(copy_action_in, action, src_offset, 0);
|
||||
MLX5_SET(copy_action_in, action, length, 7);
|
||||
MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
|
||||
MLX5_SET(copy_action_in, action, dst_offset, 0);
|
||||
MLX5_SET(copy_action_in, action, dst_offset, 24);
|
||||
|
||||
modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
|
||||
1, action);
|
||||
@ -488,13 +488,13 @@ static int rx_add_rule(struct mlx5e_priv *priv,
|
||||
|
||||
setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
|
||||
|
||||
/* Set 1 bit ipsec marker */
|
||||
/* Set 24 bit ipsec_obj_id */
|
||||
/* Set bit[31] ipsec marker */
|
||||
/* Set bit[23-0] ipsec_obj_id */
|
||||
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
|
||||
MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
|
||||
MLX5_SET(set_action_in, action, data, (ipsec_obj_id << 1) | 0x1);
|
||||
MLX5_SET(set_action_in, action, offset, 7);
|
||||
MLX5_SET(set_action_in, action, length, 25);
|
||||
MLX5_SET(set_action_in, action, data, (ipsec_obj_id | BIT(31)));
|
||||
MLX5_SET(set_action_in, action, offset, 0);
|
||||
MLX5_SET(set_action_in, action, length, 32);
|
||||
|
||||
modify_hdr = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL,
|
||||
1, action);
|
||||
|
@ -453,7 +453,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
|
||||
struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
|
||||
u8 ipsec_syndrome = ipsec_meta_data & 0xFF;
|
||||
struct mlx5e_priv *priv;
|
||||
struct xfrm_offload *xo;
|
||||
struct xfrm_state *xs;
|
||||
@ -481,7 +480,7 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
|
||||
xo = xfrm_offload(skb);
|
||||
xo->flags = CRYPTO_DONE;
|
||||
|
||||
switch (ipsec_syndrome & MLX5_IPSEC_METADATA_SYNDROM_MASK) {
|
||||
switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
|
||||
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
|
||||
xo->status = CRYPTO_SUCCESS;
|
||||
if (WARN_ON_ONCE(priv->ipsec->no_trailer))
|
||||
|
@ -39,9 +39,10 @@
|
||||
#include "en.h"
|
||||
#include "en/txrx.h"
|
||||
|
||||
#define MLX5_IPSEC_METADATA_MARKER_MASK (0x80)
|
||||
#define MLX5_IPSEC_METADATA_SYNDROM_MASK (0x7F)
|
||||
#define MLX5_IPSEC_METADATA_HANDLE(metadata) (((metadata) >> 8) & 0xFF)
|
||||
/* Bit31: IPsec marker, Bit30-24: IPsec syndrome, Bit23-0: IPsec obj id */
|
||||
#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
|
||||
#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(6, 0))
|
||||
#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
|
||||
|
||||
struct mlx5e_accel_tx_ipsec_state {
|
||||
struct xfrm_offload *xo;
|
||||
@ -78,7 +79,7 @@ static inline unsigned int mlx5e_ipsec_tx_ids_len(struct mlx5e_accel_tx_ipsec_st
|
||||
|
||||
static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
return !!(MLX5_IPSEC_METADATA_MARKER_MASK & be32_to_cpu(cqe->ft_metadata));
|
||||
return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
|
||||
}
|
||||
|
||||
static inline bool mlx5e_ipsec_is_tx_flow(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
|
||||
|
@ -476,19 +476,22 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
|
||||
|
||||
depth += sizeof(struct tcphdr);
|
||||
|
||||
if (unlikely(!sk || sk->sk_state == TCP_TIME_WAIT))
|
||||
if (unlikely(!sk))
|
||||
return;
|
||||
|
||||
if (unlikely(sk->sk_state == TCP_TIME_WAIT))
|
||||
goto unref;
|
||||
|
||||
if (unlikely(!resync_queue_get_psv(sk)))
|
||||
return;
|
||||
|
||||
skb->sk = sk;
|
||||
skb->destructor = sock_edemux;
|
||||
goto unref;
|
||||
|
||||
seq = th->seq;
|
||||
datalen = skb->len - depth;
|
||||
tls_offload_rx_resync_async_request_start(sk, seq, datalen);
|
||||
rq->stats->tls_resync_req_start++;
|
||||
|
||||
unref:
|
||||
sock_gen_put(sk);
|
||||
}
|
||||
|
||||
void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk,
|
||||
|
@ -5229,8 +5229,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
|
||||
|
||||
tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
|
||||
MLX5_FLOW_NAMESPACE_KERNEL);
|
||||
if (IS_ERR(tc->ct))
|
||||
if (IS_ERR(tc->ct)) {
|
||||
err = PTR_ERR(tc->ct);
|
||||
goto err_ct;
|
||||
}
|
||||
|
||||
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
|
||||
err = register_netdevice_notifier_dev_net(priv->netdev,
|
||||
|
@ -283,6 +283,9 @@ static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
|
||||
|
||||
reg_b = be32_to_cpu(cqe->ft_metadata);
|
||||
|
||||
if (reg_b >> (MLX5E_TC_TABLE_CHAIN_TAG_BITS + ZONE_RESTORE_BITS))
|
||||
return false;
|
||||
|
||||
chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
|
||||
if (chain)
|
||||
return true;
|
||||
|
@ -144,7 +144,9 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
|
||||
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
|
||||
}
|
||||
|
||||
/* RM 2311217: no L4 inner checksum for IPsec tunnel type packet */
|
||||
/* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet),
|
||||
* need to set L3 checksum flag for IPsec
|
||||
*/
|
||||
static void
|
||||
ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
struct mlx5_wqe_eth_seg *eseg)
|
||||
@ -154,7 +156,6 @@ ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
|
||||
sq->stats->csum_partial_inner++;
|
||||
} else {
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
|
||||
sq->stats->csum_partial++;
|
||||
}
|
||||
}
|
||||
@ -162,11 +163,6 @@ ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
static inline void
|
||||
mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
|
||||
{
|
||||
if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
|
||||
ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
|
||||
return;
|
||||
}
|
||||
|
||||
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
|
||||
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
|
||||
if (skb->encapsulation) {
|
||||
@ -177,6 +173,9 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
|
||||
sq->stats->csum_partial++;
|
||||
}
|
||||
} else if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
|
||||
ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
|
||||
|
||||
} else
|
||||
sq->stats->csum_none++;
|
||||
}
|
||||
|
@ -1142,6 +1142,10 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
|
||||
struct mlx5_vport *vport;
|
||||
|
||||
vport = mlx5_eswitch_get_vport(esw, vport_num);
|
||||
|
||||
if (!vport->qos.enabled)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
|
||||
|
||||
return mlx5_modify_scheduling_element_cmd(esw->dev,
|
||||
@ -1408,6 +1412,7 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
|
||||
int i;
|
||||
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
|
||||
memset(&vport->qos, 0, sizeof(vport->qos));
|
||||
memset(&vport->info, 0, sizeof(vport->info));
|
||||
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
|
||||
}
|
||||
@ -2221,12 +2226,15 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
|
||||
max_guarantee = evport->info.min_rate;
|
||||
}
|
||||
|
||||
return max_t(u32, max_guarantee / fw_max_bw_share, 1);
|
||||
if (max_guarantee)
|
||||
return max_t(u32, max_guarantee / fw_max_bw_share, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
|
||||
static int normalize_vports_min_rate(struct mlx5_eswitch *esw)
|
||||
{
|
||||
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
|
||||
u32 divider = calculate_vports_min_rate_divider(esw);
|
||||
struct mlx5_vport *evport;
|
||||
u32 vport_max_rate;
|
||||
u32 vport_min_rate;
|
||||
@ -2239,9 +2247,9 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
|
||||
continue;
|
||||
vport_min_rate = evport->info.min_rate;
|
||||
vport_max_rate = evport->info.max_rate;
|
||||
bw_share = MLX5_MIN_BW_SHARE;
|
||||
bw_share = 0;
|
||||
|
||||
if (vport_min_rate)
|
||||
if (divider)
|
||||
bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate,
|
||||
divider,
|
||||
fw_max_bw_share);
|
||||
@ -2266,7 +2274,6 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
|
||||
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
|
||||
u32 fw_max_bw_share;
|
||||
u32 previous_min_rate;
|
||||
u32 divider;
|
||||
bool min_rate_supported;
|
||||
bool max_rate_supported;
|
||||
int err = 0;
|
||||
@ -2291,8 +2298,7 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
|
||||
|
||||
previous_min_rate = evport->info.min_rate;
|
||||
evport->info.min_rate = min_rate;
|
||||
divider = calculate_vports_min_rate_divider(esw);
|
||||
err = normalize_vports_min_rate(esw, divider);
|
||||
err = normalize_vports_min_rate(esw);
|
||||
if (err) {
|
||||
evport->info.min_rate = previous_min_rate;
|
||||
goto unlock;
|
||||
|
@ -534,6 +534,13 @@ static void del_sw_hw_rule(struct fs_node *node)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT &&
|
||||
--fte->dests_size) {
|
||||
fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
|
||||
fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
|
||||
--fte->dests_size) {
|
||||
fte->modify_mask |=
|
||||
|
Loading…
Reference in New Issue
Block a user