net/mlx5e: Tx, Soften inline mode VLAN dependencies
If capable, use zero inline mode in TX WQE for non-VLAN packets. For VLAN ones, keep the enforcement of at least L2 inline mode, unless the WQE VLAN insertion offload cap is on. Performance: Tested single core packet rate of 64Bytes. NIC: ConnectX-5 CPU: Intel(R) Xeon(R) Gold 6154 CPU @ 3.00GHz pktgen: Before: 12.46 Mpps After: 14.65 Mpps (+17.5%) XDP_TX: The MPWQE flow is not affected, as it already has this optimization. So we test with priv-flag xdp_tx_mpwqe: off. Before: 9.90 Mpps After: 10.20 Mpps (+3%) Signed-off-by: Tariq Toukan <tariqt@mellanox.com> Tested-by: Noam Stolero <noams@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
parent
7cf6f811b7
commit
b431302e92
@ -359,6 +359,7 @@ enum {
|
||||
MLX5E_SQ_STATE_IPSEC,
|
||||
MLX5E_SQ_STATE_AM,
|
||||
MLX5E_SQ_STATE_TLS,
|
||||
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
|
||||
};
|
||||
|
||||
struct mlx5e_sq_wqe_info {
|
||||
@ -1132,7 +1133,6 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params);
|
||||
void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
|
||||
u16 num_channels);
|
||||
u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
|
||||
void mlx5e_rx_dim_work(struct work_struct *work);
|
||||
void mlx5e_tx_dim_work(struct work_struct *work);
|
||||
|
||||
|
@ -117,9 +117,27 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
|
||||
mlx5_write64((__be32 *)ctrl, uar_map);
|
||||
}
|
||||
|
||||
static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5e_tx_wqe *wqe)
|
||||
static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg)
|
||||
{
|
||||
return !!wqe->ctrl.tisn;
|
||||
return cseg && !!cseg->tisn;
|
||||
}
|
||||
|
||||
static inline u8
|
||||
mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
u8 mode;
|
||||
|
||||
if (mlx5e_transport_inline_tx_wqe(cseg))
|
||||
return MLX5_INLINE_MODE_TCP_UDP;
|
||||
|
||||
mode = sq->min_inline_mode;
|
||||
|
||||
if (skb_vlan_tag_present(skb) &&
|
||||
test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
|
||||
mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
|
||||
|
||||
return mode;
|
||||
}
|
||||
|
||||
static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
|
||||
|
@ -180,15 +180,3 @@ out:
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
u8 min_inline_mode;
|
||||
|
||||
mlx5_query_min_inline(mdev, &min_inline_mode);
|
||||
if (min_inline_mode == MLX5_INLINE_MODE_NONE &&
|
||||
!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
|
||||
min_inline_mode = MLX5_INLINE_MODE_L2;
|
||||
|
||||
return min_inline_mode;
|
||||
}
|
||||
|
@ -1101,7 +1101,7 @@ void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv)
|
||||
static void mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv *priv,
|
||||
struct mlx5e_params *params)
|
||||
{
|
||||
params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(priv->mdev);
|
||||
mlx5_query_min_inline(priv->mdev, ¶ms->tx_min_inline_mode);
|
||||
if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP &&
|
||||
params->tx_min_inline_mode == MLX5_INLINE_MODE_L2)
|
||||
params->tx_min_inline_mode = MLX5_INLINE_MODE_IP;
|
||||
|
@ -1131,6 +1131,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
|
||||
sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
|
||||
sq->stop_room = MLX5E_SQ_STOP_ROOM;
|
||||
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
|
||||
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
|
||||
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
|
||||
if (MLX5_IPSEC_DEV(c->priv->mdev))
|
||||
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
|
||||
if (mlx5_accel_is_tls_device(c->priv->mdev)) {
|
||||
@ -4777,7 +4779,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
|
||||
mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
|
||||
|
||||
/* TX inline */
|
||||
params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
|
||||
mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
|
||||
|
||||
/* RSS */
|
||||
mlx5e_build_rss_params(rss_params, params->num_channels);
|
||||
|
@ -292,8 +292,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
|
||||
stats->packets += skb_shinfo(skb)->gso_segs;
|
||||
} else {
|
||||
u8 mode = mlx5e_transport_inline_tx_wqe(wqe) ?
|
||||
MLX5_INLINE_MODE_TCP_UDP : sq->min_inline_mode;
|
||||
u8 mode = mlx5e_tx_wqe_inline_mode(sq, &wqe->ctrl, skb);
|
||||
|
||||
opcode = MLX5_OPCODE_SEND;
|
||||
mss = 0;
|
||||
@ -608,9 +607,11 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
|
||||
stats->packets += skb_shinfo(skb)->gso_segs;
|
||||
} else {
|
||||
u8 mode = mlx5e_tx_wqe_inline_mode(sq, NULL, skb);
|
||||
|
||||
opcode = MLX5_OPCODE_SEND;
|
||||
mss = 0;
|
||||
ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
|
||||
ihs = mlx5e_calc_min_inline(mode, skb);
|
||||
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
|
||||
stats->packets++;
|
||||
}
|
||||
|
@ -122,12 +122,13 @@ void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
|
||||
u8 *min_inline_mode)
|
||||
{
|
||||
switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
|
||||
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
|
||||
if (!mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode))
|
||||
break;
|
||||
/* fall through */
|
||||
case MLX5_CAP_INLINE_MODE_L2:
|
||||
*min_inline_mode = MLX5_INLINE_MODE_L2;
|
||||
break;
|
||||
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
|
||||
mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
|
||||
break;
|
||||
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
|
||||
*min_inline_mode = MLX5_INLINE_MODE_NONE;
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user