forked from Minki/linux
Merge branch 'mlx5-minimum-inline-header-mode'
Saeed Mahameed says: ==================== Mellanox 100G mlx5 minimum inline header mode This small series from Hadar adds the support for minimum inline header mode query in mlx5e NIC driver. Today on TX the driver copies to the HW descriptor only up to L2 header which is the default required mode and sufficient for today's needs. The header in the HW descriptor is used for HW loopback steering decision, without it packets will go directly to the wire with no questions asked. For TX loopback steering according to L2/L3/L4 headers, ConnectX-4 requires to copy the corresponding headers into the send queue(SQ) WQE HW descriptor so it can decide whether to loop it back or to forward to wire. For legacy E-Switch mode only L2 headers copy is required. For advanced steering (E-Switch offloads) more header layers may be required to be copied, the required mode will be advertised by FW to each VF and PF according to the corresponding E-Switch configuration. Changes V2: - Allocate query_nic_vport_context_out on the stack ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
779d1436fa
@ -129,6 +129,12 @@ static inline int mlx5_max_log_rq_size(int wq_type)
|
||||
}
|
||||
}
|
||||
|
||||
enum {
|
||||
MLX5E_INLINE_MODE_L2,
|
||||
MLX5E_INLINE_MODE_VPORT_CONTEXT,
|
||||
MLX5_INLINE_MODE_NOT_REQUIRED,
|
||||
};
|
||||
|
||||
struct mlx5e_tx_wqe {
|
||||
struct mlx5_wqe_ctrl_seg ctrl;
|
||||
struct mlx5_wqe_eth_seg eth;
|
||||
@ -188,6 +194,7 @@ struct mlx5e_params {
|
||||
bool lro_en;
|
||||
u32 lro_wqe_sz;
|
||||
u16 tx_max_inline;
|
||||
u8 tx_min_inline_mode;
|
||||
u8 rss_hfunc;
|
||||
u8 toeplitz_hash_key[40];
|
||||
u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
|
||||
@ -398,6 +405,7 @@ struct mlx5e_sq {
|
||||
u32 sqn;
|
||||
u16 bf_buf_size;
|
||||
u16 max_inline;
|
||||
u8 min_inline_mode;
|
||||
u16 edge;
|
||||
struct device *pdev;
|
||||
struct mlx5e_tstamp *tstamp;
|
||||
|
@ -56,6 +56,7 @@ struct mlx5e_sq_param {
|
||||
u32 sqc[MLX5_ST_SZ_DW(sqc)];
|
||||
struct mlx5_wq_param wq;
|
||||
u16 max_inline;
|
||||
u8 min_inline_mode;
|
||||
bool icosq;
|
||||
};
|
||||
|
||||
@ -649,6 +650,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
|
||||
}
|
||||
sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
|
||||
sq->max_inline = param->max_inline;
|
||||
sq->min_inline_mode =
|
||||
MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5E_INLINE_MODE_VPORT_CONTEXT ?
|
||||
param->min_inline_mode : 0;
|
||||
|
||||
err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
|
||||
if (err)
|
||||
@ -731,6 +735,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
|
||||
|
||||
MLX5_SET(sqc, sqc, tis_num_0, param->icosq ? 0 : priv->tisn[sq->tc]);
|
||||
MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
|
||||
MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode);
|
||||
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
|
||||
MLX5_SET(sqc, sqc, tis_lst_sz, param->icosq ? 0 : 1);
|
||||
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
|
||||
@ -1343,6 +1348,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
|
||||
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
|
||||
|
||||
param->max_inline = priv->params.tx_max_inline;
|
||||
param->min_inline_mode = priv->params.tx_min_inline_mode;
|
||||
}
|
||||
|
||||
static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
|
||||
@ -2978,6 +2984,23 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
|
||||
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
|
||||
}
|
||||
|
||||
static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
|
||||
u8 *min_inline_mode)
|
||||
{
|
||||
switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
|
||||
case MLX5E_INLINE_MODE_L2:
|
||||
*min_inline_mode = MLX5_INLINE_MODE_L2;
|
||||
break;
|
||||
case MLX5E_INLINE_MODE_VPORT_CONTEXT:
|
||||
mlx5_query_nic_vport_min_inline(mdev,
|
||||
min_inline_mode);
|
||||
break;
|
||||
case MLX5_INLINE_MODE_NOT_REQUIRED:
|
||||
*min_inline_mode = MLX5_INLINE_MODE_NONE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
|
||||
struct net_device *netdev,
|
||||
const struct mlx5e_profile *profile,
|
||||
@ -3043,6 +3066,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
|
||||
priv->params.tx_cq_moderation.pkts =
|
||||
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
|
||||
priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
|
||||
mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
|
||||
priv->params.num_tc = 1;
|
||||
priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
|
||||
|
||||
|
@ -128,6 +128,50 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
return priv->channeltc_to_txq_map[channel_ix][up];
|
||||
}
|
||||
|
||||
static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
|
||||
{
|
||||
#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
|
||||
|
||||
return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
|
||||
}
|
||||
|
||||
static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
|
||||
{
|
||||
struct flow_keys keys;
|
||||
|
||||
if (skb_transport_header_was_set(skb))
|
||||
return skb_transport_offset(skb);
|
||||
else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
|
||||
return keys.control.thoff;
|
||||
else
|
||||
return mlx5e_skb_l2_header_offset(skb);
|
||||
}
|
||||
|
||||
static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
int hlen;
|
||||
|
||||
switch (mode) {
|
||||
case MLX5_INLINE_MODE_TCP_UDP:
|
||||
hlen = eth_get_headlen(skb->data, skb_headlen(skb));
|
||||
if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
|
||||
hlen += VLAN_HLEN;
|
||||
return hlen;
|
||||
case MLX5_INLINE_MODE_IP:
|
||||
/* When transport header is set to zero, it means no transport
|
||||
* header. When transport header is set to 0xff's, it means
|
||||
* transport header wasn't set.
|
||||
*/
|
||||
if (skb_transport_offset(skb))
|
||||
return mlx5e_skb_l3_header_offset(skb);
|
||||
/* fall through */
|
||||
case MLX5_INLINE_MODE_L2:
|
||||
default:
|
||||
return mlx5e_skb_l2_header_offset(skb);
|
||||
}
|
||||
}
|
||||
|
||||
static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
|
||||
struct sk_buff *skb, bool bf)
|
||||
{
|
||||
@ -135,8 +179,6 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
|
||||
* headers and occur before the data gather.
|
||||
* Therefore these headers must be copied into the WQE
|
||||
*/
|
||||
#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
|
||||
|
||||
if (bf) {
|
||||
u16 ihs = skb_headlen(skb);
|
||||
|
||||
@ -146,8 +188,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
|
||||
if (ihs <= sq->max_inline)
|
||||
return skb_headlen(skb);
|
||||
}
|
||||
|
||||
return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
|
||||
return mlx5e_calc_min_inline(sq->min_inline_mode, skb);
|
||||
}
|
||||
|
||||
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
|
||||
|
@ -135,6 +135,18 @@ static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
|
||||
return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
|
||||
}
|
||||
|
||||
void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
|
||||
u8 *min_inline_mode)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
|
||||
|
||||
mlx5_query_nic_vport_context(mdev, 0, out, sizeof(out));
|
||||
|
||||
*min_inline_mode = MLX5_GET(query_nic_vport_context_out, out,
|
||||
nic_vport_context.min_wqe_inline_mode);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
|
||||
|
||||
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
||||
u16 vport, u8 *addr)
|
||||
{
|
||||
|
@ -129,6 +129,13 @@ __mlx5_mask(typ, fld))
|
||||
tmp; \
|
||||
})
|
||||
|
||||
enum mlx5_inline_modes {
|
||||
MLX5_INLINE_MODE_NONE,
|
||||
MLX5_INLINE_MODE_L2,
|
||||
MLX5_INLINE_MODE_IP,
|
||||
MLX5_INLINE_MODE_TCP_UDP,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_MAX_COMMANDS = 32,
|
||||
MLX5_CMD_DATA_BLOCK_SIZE = 512,
|
||||
|
@ -536,7 +536,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
|
||||
u8 self_lb_en_modifiable[0x1];
|
||||
u8 reserved_at_9[0x2];
|
||||
u8 max_lso_cap[0x5];
|
||||
u8 reserved_at_10[0x4];
|
||||
u8 reserved_at_10[0x2];
|
||||
u8 wqe_inline_mode[0x2];
|
||||
u8 rss_ind_tbl_cap[0x4];
|
||||
u8 reg_umr_sq[0x1];
|
||||
u8 scatter_fcs[0x1];
|
||||
@ -2270,7 +2271,8 @@ struct mlx5_ifc_sqc_bits {
|
||||
u8 cd_master[0x1];
|
||||
u8 fre[0x1];
|
||||
u8 flush_in_error_en[0x1];
|
||||
u8 reserved_at_4[0x4];
|
||||
u8 reserved_at_4[0x1];
|
||||
u8 min_wqe_inline_mode[0x3];
|
||||
u8 state[0x4];
|
||||
u8 reg_umr[0x1];
|
||||
u8 reserved_at_d[0x13];
|
||||
@ -2367,7 +2369,9 @@ struct mlx5_ifc_rmpc_bits {
|
||||
};
|
||||
|
||||
struct mlx5_ifc_nic_vport_context_bits {
|
||||
u8 reserved_at_0[0x1f];
|
||||
u8 reserved_at_0[0x5];
|
||||
u8 min_wqe_inline_mode[0x3];
|
||||
u8 reserved_at_8[0x17];
|
||||
u8 roce_en[0x1];
|
||||
|
||||
u8 arm_change_event[0x1];
|
||||
|
@ -43,6 +43,8 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
|
||||
u16 vport, u8 state);
|
||||
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
||||
u16 vport, u8 *addr);
|
||||
void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
|
||||
u8 *min_inline);
|
||||
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
|
||||
u16 vport, u8 *addr);
|
||||
int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
|
||||
|
Loading…
Reference in New Issue
Block a user