mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 13:51:44 +00:00
mlx5-updates-2018-03-22 (Misc updates)
This series includes misc updates for mlx5 core and netdev dirver, Highlights: From Inbar, three patches to add support for PFC stall prevention statistics and enable/disable through new ethtool tunable, as requested from previous submission. From Moshe, four patches, added more drop counters: - drop counter for netdev steering miss - drop counter for when VF logical link is down - drop counter for when netdev logical link is down. From Or, three patches to support vlan push/pop offload via tc HW action, for newer HW (Connectx-5 and onward) via HW steering flow actions rather than the emulated path for the older HW brands. And five more misc small trivial patches. -----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJauVxkAAoJEEg/ir3gV/o+Oi4H/1Mzv+XEuHLwVJHpzMNVLVeR EK1GDW3724zjY24Iy+9FRppL1mV0hY3aw1qhD+rUmUKQx+Q3OBrH1WN2QJkjBkM9 i60ygFLTHvcHyPFmHtPzCckO7ODdWsXSlEwsl8kCSITQTn1Mjf6v7Mogd0CLCbwN iu8ocpHTD+/whE5gq9aDDrGtuzgJbPiRqavvsof9pc2g1JlnvtHs9xoiN+vzoqIa 9lGS+2UM2+wFw9rMtbdaqtfVPsupMeEdGZ98chn0gpWshOvOda5NuRE59M+5un5E C3nJ4Js//PLkvQ9Nu7+goXtbfPLCxvcuurid5TM2Su9hmD+9Us223TcAvdpOMoY= =VdCO -----END PGP SIGNATURE----- Merge tag 'mlx5-updates-2018-03-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5-updates-2018-03-22 (Misc updates) This series includes misc updates for mlx5 core and netdev dirver, Highlights: From Inbar, three patches to add support for PFC stall prevention statistics and enable/disable through new ethtool tunable, as requested from previous submission. From Moshe, four patches, added more drop counters: - drop counter for netdev steering miss - drop counter for when VF logical link is down - drop counter for when netdev logical link is down. From Or, three patches to support vlan push/pop offload via tc HW action, for newer HW (Connectx-5 and onward) via HW steering flow actions rather than the emulated path for the older HW brands. And five more misc small trivial patches. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
5f75a1863e
@ -359,6 +359,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
|||||||
case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
|
case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
|
||||||
case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
|
case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
|
||||||
case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
|
case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
|
||||||
|
case MLX5_CMD_OP_QUERY_VNIC_ENV:
|
||||||
case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
|
case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
|
||||||
case MLX5_CMD_OP_ALLOC_Q_COUNTER:
|
case MLX5_CMD_OP_ALLOC_Q_COUNTER:
|
||||||
case MLX5_CMD_OP_QUERY_Q_COUNTER:
|
case MLX5_CMD_OP_QUERY_Q_COUNTER:
|
||||||
@ -501,6 +502,7 @@ const char *mlx5_command_str(int command)
|
|||||||
MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
|
MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
|
||||||
MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
|
MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
|
||||||
MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
|
MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
|
||||||
|
MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV);
|
||||||
MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
|
MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
|
||||||
MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
|
MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
|
||||||
MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
|
MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
|
||||||
@ -1802,7 +1804,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
|
|||||||
|
|
||||||
cmd->checksum_disabled = 1;
|
cmd->checksum_disabled = 1;
|
||||||
cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
|
cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
|
||||||
cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
|
cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
|
||||||
|
|
||||||
cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
|
cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
|
||||||
if (cmd->cmdif_rev > CMD_IF_REV) {
|
if (cmd->cmdif_rev > CMD_IF_REV) {
|
||||||
|
@ -136,6 +136,8 @@ TRACE_EVENT(mlx5_fs_del_fg,
|
|||||||
{MLX5_FLOW_CONTEXT_ACTION_ENCAP, "ENCAP"},\
|
{MLX5_FLOW_CONTEXT_ACTION_ENCAP, "ENCAP"},\
|
||||||
{MLX5_FLOW_CONTEXT_ACTION_DECAP, "DECAP"},\
|
{MLX5_FLOW_CONTEXT_ACTION_DECAP, "DECAP"},\
|
||||||
{MLX5_FLOW_CONTEXT_ACTION_MOD_HDR, "MOD_HDR"},\
|
{MLX5_FLOW_CONTEXT_ACTION_MOD_HDR, "MOD_HDR"},\
|
||||||
|
{MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH, "VLAN_PUSH"},\
|
||||||
|
{MLX5_FLOW_CONTEXT_ACTION_VLAN_POP, "VLAN_POP"},\
|
||||||
{MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"}
|
{MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"}
|
||||||
|
|
||||||
TRACE_EVENT(mlx5_fs_set_fte,
|
TRACE_EVENT(mlx5_fs_set_fte,
|
||||||
|
@ -781,7 +781,8 @@ struct mlx5e_priv {
|
|||||||
struct net_device *netdev;
|
struct net_device *netdev;
|
||||||
struct mlx5e_stats stats;
|
struct mlx5e_stats stats;
|
||||||
struct hwtstamp_config tstamp;
|
struct hwtstamp_config tstamp;
|
||||||
u16 q_counter;
|
u16 q_counter;
|
||||||
|
u16 drop_rq_q_counter;
|
||||||
#ifdef CONFIG_MLX5_CORE_EN_DCB
|
#ifdef CONFIG_MLX5_CORE_EN_DCB
|
||||||
struct mlx5e_dcbx dcbx;
|
struct mlx5e_dcbx dcbx;
|
||||||
#endif
|
#endif
|
||||||
@ -1061,7 +1062,6 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
|
|||||||
int mlx5e_close(struct net_device *netdev);
|
int mlx5e_close(struct net_device *netdev);
|
||||||
int mlx5e_open(struct net_device *netdev);
|
int mlx5e_open(struct net_device *netdev);
|
||||||
void mlx5e_update_stats_work(struct work_struct *work);
|
void mlx5e_update_stats_work(struct work_struct *work);
|
||||||
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
|
|
||||||
|
|
||||||
int mlx5e_bits_invert(unsigned long a, int size);
|
int mlx5e_bits_invert(unsigned long a, int size);
|
||||||
|
|
||||||
|
@ -203,9 +203,6 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
|
|||||||
{
|
{
|
||||||
int i, idx = 0;
|
int i, idx = 0;
|
||||||
|
|
||||||
if (!data)
|
|
||||||
return;
|
|
||||||
|
|
||||||
mutex_lock(&priv->state_lock);
|
mutex_lock(&priv->state_lock);
|
||||||
mlx5e_update_stats(priv);
|
mlx5e_update_stats(priv);
|
||||||
mutex_unlock(&priv->state_lock);
|
mutex_unlock(&priv->state_lock);
|
||||||
@ -1066,6 +1063,57 @@ static int mlx5e_get_rxnfc(struct net_device *netdev,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100
|
||||||
|
#define MLX5E_PFC_PREVEN_TOUT_MAX_MSEC 8000
|
||||||
|
#define MLX5E_PFC_PREVEN_MINOR_PRECENT 85
|
||||||
|
#define MLX5E_PFC_PREVEN_TOUT_MIN_MSEC 80
|
||||||
|
#define MLX5E_DEVICE_STALL_MINOR_WATERMARK(critical_tout) \
|
||||||
|
max_t(u16, MLX5E_PFC_PREVEN_TOUT_MIN_MSEC, \
|
||||||
|
(critical_tout * MLX5E_PFC_PREVEN_MINOR_PRECENT) / 100)
|
||||||
|
|
||||||
|
static int mlx5e_get_pfc_prevention_tout(struct net_device *netdev,
|
||||||
|
u16 *pfc_prevention_tout)
|
||||||
|
{
|
||||||
|
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||||
|
struct mlx5_core_dev *mdev = priv->mdev;
|
||||||
|
|
||||||
|
if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) ||
|
||||||
|
!MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
return mlx5_query_port_stall_watermark(mdev, pfc_prevention_tout, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mlx5e_set_pfc_prevention_tout(struct net_device *netdev,
|
||||||
|
u16 pfc_preven)
|
||||||
|
{
|
||||||
|
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||||
|
struct mlx5_core_dev *mdev = priv->mdev;
|
||||||
|
u16 critical_tout;
|
||||||
|
u16 minor;
|
||||||
|
|
||||||
|
if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) ||
|
||||||
|
!MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
critical_tout = (pfc_preven == PFC_STORM_PREVENTION_AUTO) ?
|
||||||
|
MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC :
|
||||||
|
pfc_preven;
|
||||||
|
|
||||||
|
if (critical_tout != PFC_STORM_PREVENTION_DISABLE &&
|
||||||
|
(critical_tout > MLX5E_PFC_PREVEN_TOUT_MAX_MSEC ||
|
||||||
|
critical_tout < MLX5E_PFC_PREVEN_TOUT_MIN_MSEC)) {
|
||||||
|
netdev_info(netdev, "%s: pfc prevention tout not in range (%d-%d)\n",
|
||||||
|
__func__, MLX5E_PFC_PREVEN_TOUT_MIN_MSEC,
|
||||||
|
MLX5E_PFC_PREVEN_TOUT_MAX_MSEC);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
minor = MLX5E_DEVICE_STALL_MINOR_WATERMARK(critical_tout);
|
||||||
|
return mlx5_set_port_stall_watermark(mdev, critical_tout,
|
||||||
|
minor);
|
||||||
|
}
|
||||||
|
|
||||||
static int mlx5e_get_tunable(struct net_device *dev,
|
static int mlx5e_get_tunable(struct net_device *dev,
|
||||||
const struct ethtool_tunable *tuna,
|
const struct ethtool_tunable *tuna,
|
||||||
void *data)
|
void *data)
|
||||||
@ -1077,6 +1125,9 @@ static int mlx5e_get_tunable(struct net_device *dev,
|
|||||||
case ETHTOOL_TX_COPYBREAK:
|
case ETHTOOL_TX_COPYBREAK:
|
||||||
*(u32 *)data = priv->channels.params.tx_max_inline;
|
*(u32 *)data = priv->channels.params.tx_max_inline;
|
||||||
break;
|
break;
|
||||||
|
case ETHTOOL_PFC_PREVENTION_TOUT:
|
||||||
|
err = mlx5e_get_pfc_prevention_tout(dev, data);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
break;
|
break;
|
||||||
@ -1118,6 +1169,9 @@ static int mlx5e_set_tunable(struct net_device *dev,
|
|||||||
break;
|
break;
|
||||||
mlx5e_switch_priv_channels(priv, &new_channels, NULL);
|
mlx5e_switch_priv_channels(priv, &new_channels, NULL);
|
||||||
|
|
||||||
|
break;
|
||||||
|
case ETHTOOL_PFC_PREVENTION_TOUT:
|
||||||
|
err = mlx5e_set_pfc_prevention_tout(dev, *(u16 *)data);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
|
@ -615,8 +615,7 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq,
|
|||||||
static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
|
static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
|
||||||
int next_state)
|
int next_state)
|
||||||
{
|
{
|
||||||
struct mlx5e_channel *c = rq->channel;
|
struct mlx5_core_dev *mdev = rq->mdev;
|
||||||
struct mlx5_core_dev *mdev = c->mdev;
|
|
||||||
|
|
||||||
void *in;
|
void *in;
|
||||||
void *rqc;
|
void *rqc;
|
||||||
@ -1768,14 +1767,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
|
|||||||
param->wq.linear = 1;
|
param->wq.linear = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
|
static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
|
||||||
struct mlx5e_rq_param *param)
|
struct mlx5e_rq_param *param)
|
||||||
{
|
{
|
||||||
|
struct mlx5_core_dev *mdev = priv->mdev;
|
||||||
void *rqc = param->rqc;
|
void *rqc = param->rqc;
|
||||||
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
|
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
|
||||||
|
|
||||||
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
|
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
|
||||||
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
|
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
|
||||||
|
MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
|
||||||
|
|
||||||
param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
|
param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
|
||||||
}
|
}
|
||||||
@ -2643,15 +2644,16 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
|
|||||||
return mlx5e_alloc_cq_common(mdev, param, cq);
|
return mlx5e_alloc_cq_common(mdev, param, cq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
|
static int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
|
||||||
struct mlx5e_rq *drop_rq)
|
struct mlx5e_rq *drop_rq)
|
||||||
{
|
{
|
||||||
|
struct mlx5_core_dev *mdev = priv->mdev;
|
||||||
struct mlx5e_cq_param cq_param = {};
|
struct mlx5e_cq_param cq_param = {};
|
||||||
struct mlx5e_rq_param rq_param = {};
|
struct mlx5e_rq_param rq_param = {};
|
||||||
struct mlx5e_cq *cq = &drop_rq->cq;
|
struct mlx5e_cq *cq = &drop_rq->cq;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
mlx5e_build_drop_rq_param(mdev, &rq_param);
|
mlx5e_build_drop_rq_param(priv, &rq_param);
|
||||||
|
|
||||||
err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
|
err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
|
||||||
if (err)
|
if (err)
|
||||||
@ -2669,6 +2671,10 @@ static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
|
|||||||
if (err)
|
if (err)
|
||||||
goto err_free_rq;
|
goto err_free_rq;
|
||||||
|
|
||||||
|
err = mlx5e_modify_rq_state(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
|
||||||
|
if (err)
|
||||||
|
mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_free_rq:
|
err_free_rq:
|
||||||
@ -3236,24 +3242,20 @@ static int mlx5e_set_features(struct net_device *netdev,
|
|||||||
netdev_features_t features)
|
netdev_features_t features)
|
||||||
{
|
{
|
||||||
netdev_features_t oper_features = netdev->features;
|
netdev_features_t oper_features = netdev->features;
|
||||||
int err;
|
int err = 0;
|
||||||
|
|
||||||
err = mlx5e_handle_feature(netdev, &oper_features, features,
|
#define MLX5E_HANDLE_FEATURE(feature, handler) \
|
||||||
NETIF_F_LRO, set_feature_lro);
|
mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
|
||||||
err |= mlx5e_handle_feature(netdev, &oper_features, features,
|
|
||||||
NETIF_F_HW_VLAN_CTAG_FILTER,
|
err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
|
||||||
|
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
|
||||||
set_feature_cvlan_filter);
|
set_feature_cvlan_filter);
|
||||||
err |= mlx5e_handle_feature(netdev, &oper_features, features,
|
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
|
||||||
NETIF_F_HW_TC, set_feature_tc_num_filters);
|
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
|
||||||
err |= mlx5e_handle_feature(netdev, &oper_features, features,
|
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
|
||||||
NETIF_F_RXALL, set_feature_rx_all);
|
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
|
||||||
err |= mlx5e_handle_feature(netdev, &oper_features, features,
|
|
||||||
NETIF_F_RXFCS, set_feature_rx_fcs);
|
|
||||||
err |= mlx5e_handle_feature(netdev, &oper_features, features,
|
|
||||||
NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
|
|
||||||
#ifdef CONFIG_RFS_ACCEL
|
#ifdef CONFIG_RFS_ACCEL
|
||||||
err |= mlx5e_handle_feature(netdev, &oper_features, features,
|
err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
|
||||||
NETIF_F_NTUPLE, set_feature_arfs);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
@ -3961,7 +3963,7 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
|
|||||||
MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
|
MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
|
static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -4104,6 +4106,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
|||||||
netdev->vlan_features |= NETIF_F_RXCSUM;
|
netdev->vlan_features |= NETIF_F_RXCSUM;
|
||||||
netdev->vlan_features |= NETIF_F_RXHASH;
|
netdev->vlan_features |= NETIF_F_RXHASH;
|
||||||
|
|
||||||
|
netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||||
|
netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX;
|
||||||
|
|
||||||
if (!!MLX5_CAP_ETH(mdev, lro_cap))
|
if (!!MLX5_CAP_ETH(mdev, lro_cap))
|
||||||
netdev->vlan_features |= NETIF_F_LRO;
|
netdev->vlan_features |= NETIF_F_LRO;
|
||||||
|
|
||||||
@ -4183,7 +4188,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
|||||||
mlx5e_ipsec_build_netdev(priv);
|
mlx5e_ipsec_build_netdev(priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
|
static void mlx5e_create_q_counters(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
struct mlx5_core_dev *mdev = priv->mdev;
|
struct mlx5_core_dev *mdev = priv->mdev;
|
||||||
int err;
|
int err;
|
||||||
@ -4193,14 +4198,21 @@ static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
|
|||||||
mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
|
mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
|
||||||
priv->q_counter = 0;
|
priv->q_counter = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = mlx5_core_alloc_q_counter(mdev, &priv->drop_rq_q_counter);
|
||||||
|
if (err) {
|
||||||
|
mlx5_core_warn(mdev, "alloc drop RQ counter failed, %d\n", err);
|
||||||
|
priv->drop_rq_q_counter = 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
|
static void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
if (!priv->q_counter)
|
if (priv->q_counter)
|
||||||
return;
|
mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
|
||||||
|
|
||||||
mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
|
if (priv->drop_rq_q_counter)
|
||||||
|
mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
|
static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
|
||||||
@ -4439,18 +4451,18 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
|
|||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
err = mlx5e_open_drop_rq(mdev, &priv->drop_rq);
|
mlx5e_create_q_counters(priv);
|
||||||
|
|
||||||
|
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
|
||||||
if (err) {
|
if (err) {
|
||||||
mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
|
mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
|
||||||
goto err_cleanup_tx;
|
goto err_destroy_q_counters;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = profile->init_rx(priv);
|
err = profile->init_rx(priv);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_close_drop_rq;
|
goto err_close_drop_rq;
|
||||||
|
|
||||||
mlx5e_create_q_counter(priv);
|
|
||||||
|
|
||||||
if (profile->enable)
|
if (profile->enable)
|
||||||
profile->enable(priv);
|
profile->enable(priv);
|
||||||
|
|
||||||
@ -4459,7 +4471,8 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
|
|||||||
err_close_drop_rq:
|
err_close_drop_rq:
|
||||||
mlx5e_close_drop_rq(&priv->drop_rq);
|
mlx5e_close_drop_rq(&priv->drop_rq);
|
||||||
|
|
||||||
err_cleanup_tx:
|
err_destroy_q_counters:
|
||||||
|
mlx5e_destroy_q_counters(priv);
|
||||||
profile->cleanup_tx(priv);
|
profile->cleanup_tx(priv);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
@ -4476,9 +4489,9 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
|
|||||||
profile->disable(priv);
|
profile->disable(priv);
|
||||||
flush_workqueue(priv->wq);
|
flush_workqueue(priv->wq);
|
||||||
|
|
||||||
mlx5e_destroy_q_counter(priv);
|
|
||||||
profile->cleanup_rx(priv);
|
profile->cleanup_rx(priv);
|
||||||
mlx5e_close_drop_rq(&priv->drop_rq);
|
mlx5e_close_drop_rq(&priv->drop_rq);
|
||||||
|
mlx5e_destroy_q_counters(priv);
|
||||||
profile->cleanup_tx(priv);
|
profile->cleanup_tx(priv);
|
||||||
cancel_delayed_work_sync(&priv->update_stats_work);
|
cancel_delayed_work_sync(&priv->update_stats_work);
|
||||||
}
|
}
|
||||||
|
@ -170,11 +170,24 @@ static const struct counter_desc q_stats_desc[] = {
|
|||||||
{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
|
{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct counter_desc drop_rq_stats_desc[] = {
|
||||||
|
{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
|
||||||
|
};
|
||||||
|
|
||||||
#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
|
#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
|
||||||
|
#define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
|
||||||
|
|
||||||
static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
|
static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
return priv->q_counter ? NUM_Q_COUNTERS : 0;
|
int num_stats = 0;
|
||||||
|
|
||||||
|
if (priv->q_counter)
|
||||||
|
num_stats += NUM_Q_COUNTERS;
|
||||||
|
|
||||||
|
if (priv->drop_rq_q_counter)
|
||||||
|
num_stats += NUM_DROP_RQ_COUNTERS;
|
||||||
|
|
||||||
|
return num_stats;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
|
static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
|
||||||
@ -182,7 +195,13 @@ static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
|
for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
|
||||||
strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
|
strcpy(data + (idx++) * ETH_GSTRING_LEN,
|
||||||
|
q_stats_desc[i].format);
|
||||||
|
|
||||||
|
for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
|
||||||
|
strcpy(data + (idx++) * ETH_GSTRING_LEN,
|
||||||
|
drop_rq_stats_desc[i].format);
|
||||||
|
|
||||||
return idx;
|
return idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -191,7 +210,11 @@ static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
|
for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
|
||||||
data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, q_stats_desc, i);
|
data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
|
||||||
|
q_stats_desc, i);
|
||||||
|
for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
|
||||||
|
data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
|
||||||
|
drop_rq_stats_desc, i);
|
||||||
return idx;
|
return idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -199,16 +222,76 @@ static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
|
|||||||
{
|
{
|
||||||
struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
|
struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
|
||||||
u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
|
u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
|
||||||
int err;
|
|
||||||
|
|
||||||
if (!priv->q_counter)
|
if (priv->q_counter &&
|
||||||
|
!mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out,
|
||||||
|
sizeof(out)))
|
||||||
|
qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
|
||||||
|
out, out_of_buffer);
|
||||||
|
if (priv->drop_rq_q_counter &&
|
||||||
|
!mlx5_core_query_q_counter(priv->mdev, priv->drop_rq_q_counter, 0,
|
||||||
|
out, sizeof(out)))
|
||||||
|
qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, out,
|
||||||
|
out_of_buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
|
||||||
|
static const struct counter_desc vnic_env_stats_desc[] = {
|
||||||
|
{ "rx_steer_missed_packets",
|
||||||
|
VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
|
||||||
|
};
|
||||||
|
|
||||||
|
#define NUM_VNIC_ENV_COUNTERS ARRAY_SIZE(vnic_env_stats_desc)
|
||||||
|
|
||||||
|
static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv)
|
||||||
|
{
|
||||||
|
return MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard) ?
|
||||||
|
NUM_VNIC_ENV_COUNTERS : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
|
||||||
|
int idx)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
|
||||||
|
return idx;
|
||||||
|
|
||||||
|
for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++)
|
||||||
|
strcpy(data + (idx++) * ETH_GSTRING_LEN,
|
||||||
|
vnic_env_stats_desc[i].format);
|
||||||
|
return idx;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data,
|
||||||
|
int idx)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
|
||||||
|
return idx;
|
||||||
|
|
||||||
|
for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++)
|
||||||
|
data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
|
||||||
|
vnic_env_stats_desc, i);
|
||||||
|
return idx;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv)
|
||||||
|
{
|
||||||
|
u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
|
||||||
|
int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out);
|
||||||
|
u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
|
||||||
|
struct mlx5_core_dev *mdev = priv->mdev;
|
||||||
|
|
||||||
|
if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
|
MLX5_SET(query_vnic_env_in, in, opcode,
|
||||||
if (err)
|
MLX5_CMD_OP_QUERY_VNIC_ENV);
|
||||||
return;
|
MLX5_SET(query_vnic_env_in, in, op_mod, 0);
|
||||||
|
MLX5_SET(query_vnic_env_in, in, other_vport, 0);
|
||||||
qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
|
mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
|
#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
|
||||||
@ -754,7 +837,15 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
|
|||||||
{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
|
{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct counter_desc pport_pfc_stall_stats_desc[] = {
|
||||||
|
{ "tx_pause_storm_warning_events ", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
|
||||||
|
{ "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
|
||||||
|
};
|
||||||
|
|
||||||
#define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
|
#define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
|
||||||
|
#define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
|
||||||
|
MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
|
||||||
|
MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
|
||||||
|
|
||||||
static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
|
static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
@ -790,7 +881,8 @@ static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
|
|||||||
{
|
{
|
||||||
return (mlx5e_query_global_pause_combined(priv) +
|
return (mlx5e_query_global_pause_combined(priv) +
|
||||||
hweight8(mlx5e_query_pfc_combined(priv))) *
|
hweight8(mlx5e_query_pfc_combined(priv))) *
|
||||||
NUM_PPORT_PER_PRIO_PFC_COUNTERS;
|
NUM_PPORT_PER_PRIO_PFC_COUNTERS +
|
||||||
|
NUM_PPORT_PFC_STALL_COUNTERS(priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
|
static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
|
||||||
@ -818,6 +910,10 @@ static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
|
||||||
|
strcpy(data + (idx++) * ETH_GSTRING_LEN,
|
||||||
|
pport_pfc_stall_stats_desc[i].format);
|
||||||
|
|
||||||
return idx;
|
return idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -845,6 +941,10 @@ static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
|
||||||
|
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
|
||||||
|
pport_pfc_stall_stats_desc, i);
|
||||||
|
|
||||||
return idx;
|
return idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1094,6 +1194,12 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
|
|||||||
.update_stats_mask = MLX5E_NDO_UPDATE_STATS,
|
.update_stats_mask = MLX5E_NDO_UPDATE_STATS,
|
||||||
.update_stats = mlx5e_grp_q_update_stats,
|
.update_stats = mlx5e_grp_q_update_stats,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.get_num_stats = mlx5e_grp_vnic_env_get_num_stats,
|
||||||
|
.fill_strings = mlx5e_grp_vnic_env_fill_strings,
|
||||||
|
.fill_stats = mlx5e_grp_vnic_env_fill_stats,
|
||||||
|
.update_stats = mlx5e_grp_vnic_env_update_stats,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
.get_num_stats = mlx5e_grp_vport_get_num_stats,
|
.get_num_stats = mlx5e_grp_vport_get_num_stats,
|
||||||
.fill_strings = mlx5e_grp_vport_fill_strings,
|
.fill_strings = mlx5e_grp_vport_fill_strings,
|
||||||
|
@ -97,6 +97,11 @@ struct mlx5e_sw_stats {
|
|||||||
|
|
||||||
struct mlx5e_qcounter_stats {
|
struct mlx5e_qcounter_stats {
|
||||||
u32 rx_out_of_buffer;
|
u32 rx_out_of_buffer;
|
||||||
|
u32 rx_if_down_packets;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct mlx5e_vnic_env_stats {
|
||||||
|
__be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
|
||||||
};
|
};
|
||||||
|
|
||||||
#define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
|
#define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
|
||||||
@ -201,6 +206,7 @@ struct mlx5e_ch_stats {
|
|||||||
struct mlx5e_stats {
|
struct mlx5e_stats {
|
||||||
struct mlx5e_sw_stats sw;
|
struct mlx5e_sw_stats sw;
|
||||||
struct mlx5e_qcounter_stats qcnt;
|
struct mlx5e_qcounter_stats qcnt;
|
||||||
|
struct mlx5e_vnic_env_stats vnic;
|
||||||
struct mlx5e_vport_stats vport;
|
struct mlx5e_vport_stats vport;
|
||||||
struct mlx5e_pport_stats pport;
|
struct mlx5e_pport_stats pport;
|
||||||
struct rtnl_link_stats64 vf_vport;
|
struct rtnl_link_stats64 vf_vport;
|
||||||
|
@ -2530,12 +2530,17 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
|||||||
if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
|
if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
|
||||||
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
|
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
|
||||||
} else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
|
} else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
|
||||||
if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
|
|
||||||
tcf_vlan_push_prio(a))
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
|
|
||||||
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
|
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
|
||||||
attr->vlan = tcf_vlan_push_vid(a);
|
attr->vlan_vid = tcf_vlan_push_vid(a);
|
||||||
|
if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) {
|
||||||
|
attr->vlan_prio = tcf_vlan_push_prio(a);
|
||||||
|
attr->vlan_proto = tcf_vlan_push_proto(a);
|
||||||
|
if (!attr->vlan_proto)
|
||||||
|
attr->vlan_proto = htons(ETH_P_8021Q);
|
||||||
|
} else if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
|
||||||
|
tcf_vlan_push_prio(a)) {
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
}
|
||||||
} else { /* action is TCA_VLAN_ACT_MODIFY */
|
} else { /* action is TCA_VLAN_ACT_MODIFY */
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
@ -2096,17 +2096,19 @@ unlock:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
|
static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
|
||||||
int vport_idx,
|
int vport_idx,
|
||||||
struct mlx5_vport_drop_stats *stats)
|
struct mlx5_vport_drop_stats *stats)
|
||||||
{
|
{
|
||||||
struct mlx5_eswitch *esw = dev->priv.eswitch;
|
struct mlx5_eswitch *esw = dev->priv.eswitch;
|
||||||
struct mlx5_vport *vport = &esw->vports[vport_idx];
|
struct mlx5_vport *vport = &esw->vports[vport_idx];
|
||||||
|
u64 rx_discard_vport_down, tx_discard_vport_down;
|
||||||
u64 bytes = 0;
|
u64 bytes = 0;
|
||||||
u16 idx = 0;
|
u16 idx = 0;
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
if (!vport->enabled || esw->mode != SRIOV_LEGACY)
|
if (!vport->enabled || esw->mode != SRIOV_LEGACY)
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
if (vport->egress.drop_counter) {
|
if (vport->egress.drop_counter) {
|
||||||
idx = vport->egress.drop_counter->id;
|
idx = vport->egress.drop_counter->id;
|
||||||
@ -2117,6 +2119,23 @@ static void mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
|
|||||||
idx = vport->ingress.drop_counter->id;
|
idx = vport->ingress.drop_counter->id;
|
||||||
mlx5_fc_query(dev, idx, &stats->tx_dropped, &bytes);
|
mlx5_fc_query(dev, idx, &stats->tx_dropped, &bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
|
||||||
|
!MLX5_CAP_GEN(dev, transmit_discard_vport_down))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err = mlx5_query_vport_down_stats(dev, vport_idx,
|
||||||
|
&rx_discard_vport_down,
|
||||||
|
&tx_discard_vport_down);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
|
||||||
|
stats->rx_dropped += rx_discard_vport_down;
|
||||||
|
if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
|
||||||
|
stats->tx_dropped += tx_discard_vport_down;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
|
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
|
||||||
@ -2180,7 +2199,9 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
|
|||||||
vf_stats->broadcast =
|
vf_stats->broadcast =
|
||||||
MLX5_GET_CTR(out, received_eth_broadcast.packets);
|
MLX5_GET_CTR(out, received_eth_broadcast.packets);
|
||||||
|
|
||||||
mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
|
err = mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
|
||||||
|
if (err)
|
||||||
|
goto free_out;
|
||||||
vf_stats->rx_dropped = stats.rx_dropped;
|
vf_stats->rx_dropped = stats.rx_dropped;
|
||||||
vf_stats->tx_dropped = stats.tx_dropped;
|
vf_stats->tx_dropped = stats.tx_dropped;
|
||||||
|
|
||||||
|
@ -227,15 +227,14 @@ enum {
|
|||||||
SET_VLAN_INSERT = BIT(1)
|
SET_VLAN_INSERT = BIT(1)
|
||||||
};
|
};
|
||||||
|
|
||||||
#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x4000
|
|
||||||
#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x8000
|
|
||||||
|
|
||||||
struct mlx5_esw_flow_attr {
|
struct mlx5_esw_flow_attr {
|
||||||
struct mlx5_eswitch_rep *in_rep;
|
struct mlx5_eswitch_rep *in_rep;
|
||||||
struct mlx5_eswitch_rep *out_rep;
|
struct mlx5_eswitch_rep *out_rep;
|
||||||
|
|
||||||
int action;
|
int action;
|
||||||
u16 vlan;
|
__be16 vlan_proto;
|
||||||
|
u16 vlan_vid;
|
||||||
|
u8 vlan_prio;
|
||||||
bool vlan_handled;
|
bool vlan_handled;
|
||||||
u32 encap_id;
|
u32 encap_id;
|
||||||
u32 mod_hdr_id;
|
u32 mod_hdr_id;
|
||||||
@ -258,6 +257,12 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
|
|||||||
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
|
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
|
||||||
int vport, u16 vlan, u8 qos, u8 set_flags);
|
int vport, u16 vlan, u8 qos, u8 set_flags);
|
||||||
|
|
||||||
|
static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev)
|
||||||
|
{
|
||||||
|
return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
|
||||||
|
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
|
||||||
|
}
|
||||||
|
|
||||||
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
|
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
|
||||||
|
|
||||||
#define esw_info(dev, format, ...) \
|
#define esw_info(dev, format, ...) \
|
||||||
|
@ -58,8 +58,16 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
|
|||||||
if (esw->mode != SRIOV_OFFLOADS)
|
if (esw->mode != SRIOV_OFFLOADS)
|
||||||
return ERR_PTR(-EOPNOTSUPP);
|
return ERR_PTR(-EOPNOTSUPP);
|
||||||
|
|
||||||
/* per flow vlan pop/push is emulated, don't set that into the firmware */
|
flow_act.action = attr->action;
|
||||||
flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
|
/* if per flow vlan pop/push is emulated, don't set that into the firmware */
|
||||||
|
if (!mlx5_eswitch_vlan_actions_supported(esw->dev))
|
||||||
|
flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
|
||||||
|
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
|
||||||
|
else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
|
||||||
|
flow_act.vlan.ethtype = ntohs(attr->vlan_proto);
|
||||||
|
flow_act.vlan.vid = attr->vlan_vid;
|
||||||
|
flow_act.vlan.prio = attr->vlan_prio;
|
||||||
|
}
|
||||||
|
|
||||||
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
|
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
|
||||||
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
|
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
|
||||||
@ -88,10 +96,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
|
|||||||
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
|
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
|
||||||
spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
|
spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
|
||||||
|
|
||||||
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
|
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
|
||||||
flow_act.modify_id = attr->mod_hdr_id;
|
flow_act.modify_id = attr->mod_hdr_id;
|
||||||
|
|
||||||
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
|
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
|
||||||
flow_act.encap_id = attr->encap_id;
|
flow_act.encap_id = attr->encap_id;
|
||||||
|
|
||||||
rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
|
rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
|
||||||
@ -185,7 +193,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
|
|||||||
/* protects against (1) setting rules with different vlans to push and
|
/* protects against (1) setting rules with different vlans to push and
|
||||||
* (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
|
* (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
|
||||||
*/
|
*/
|
||||||
if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
|
if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid))
|
||||||
goto out_notsupp;
|
goto out_notsupp;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -202,6 +210,10 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
|
|||||||
bool push, pop, fwd;
|
bool push, pop, fwd;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
|
/* nop if we're on the vlan push/pop non emulation mode */
|
||||||
|
if (mlx5_eswitch_vlan_actions_supported(esw->dev))
|
||||||
|
return 0;
|
||||||
|
|
||||||
push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
|
push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
|
||||||
pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
|
pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
|
||||||
fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
|
fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
|
||||||
@ -239,11 +251,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
|
|||||||
if (vport->vlan_refcount)
|
if (vport->vlan_refcount)
|
||||||
goto skip_set_push;
|
goto skip_set_push;
|
||||||
|
|
||||||
err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
|
err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid, 0,
|
||||||
SET_VLAN_INSERT | SET_VLAN_STRIP);
|
SET_VLAN_INSERT | SET_VLAN_STRIP);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
vport->vlan = attr->vlan;
|
vport->vlan = attr->vlan_vid;
|
||||||
skip_set_push:
|
skip_set_push:
|
||||||
vport->vlan_refcount++;
|
vport->vlan_refcount++;
|
||||||
}
|
}
|
||||||
@ -261,6 +273,10 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
|
|||||||
bool push, pop, fwd;
|
bool push, pop, fwd;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
|
/* nop if we're on the vlan push/pop non emulation mode */
|
||||||
|
if (mlx5_eswitch_vlan_actions_supported(esw->dev))
|
||||||
|
return 0;
|
||||||
|
|
||||||
if (!attr->vlan_handled)
|
if (!attr->vlan_handled)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -317,7 +317,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
|
|||||||
fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
|
fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
|
||||||
u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
|
u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
|
||||||
struct mlx5_flow_rule *dst;
|
struct mlx5_flow_rule *dst;
|
||||||
void *in_flow_context;
|
void *in_flow_context, *vlan;
|
||||||
void *in_match_value;
|
void *in_match_value;
|
||||||
void *in_dests;
|
void *in_dests;
|
||||||
u32 *in;
|
u32 *in;
|
||||||
@ -340,11 +340,19 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
|
|||||||
|
|
||||||
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
|
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
|
||||||
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
|
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
|
||||||
|
|
||||||
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag);
|
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag);
|
||||||
MLX5_SET(flow_context, in_flow_context, action, fte->action.action);
|
MLX5_SET(flow_context, in_flow_context, action, fte->action.action);
|
||||||
MLX5_SET(flow_context, in_flow_context, encap_id, fte->action.encap_id);
|
MLX5_SET(flow_context, in_flow_context, encap_id, fte->action.encap_id);
|
||||||
MLX5_SET(flow_context, in_flow_context, modify_header_id,
|
MLX5_SET(flow_context, in_flow_context, modify_header_id,
|
||||||
fte->action.modify_id);
|
fte->action.modify_id);
|
||||||
|
|
||||||
|
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
|
||||||
|
|
||||||
|
MLX5_SET(vlan, vlan, ethtype, fte->action.vlan.ethtype);
|
||||||
|
MLX5_SET(vlan, vlan, vid, fte->action.vlan.vid);
|
||||||
|
MLX5_SET(vlan, vlan, prio, fte->action.vlan.prio);
|
||||||
|
|
||||||
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
|
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
|
||||||
match_value);
|
match_value);
|
||||||
memcpy(in_match_value, &fte->val, sizeof(fte->val));
|
memcpy(in_match_value, &fte->val, sizeof(fte->val));
|
||||||
|
@ -1439,7 +1439,9 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
|
|||||||
if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
|
if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
|
||||||
MLX5_FLOW_CONTEXT_ACTION_ENCAP |
|
MLX5_FLOW_CONTEXT_ACTION_ENCAP |
|
||||||
MLX5_FLOW_CONTEXT_ACTION_DECAP |
|
MLX5_FLOW_CONTEXT_ACTION_DECAP |
|
||||||
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR))
|
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
|
||||||
|
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
|
||||||
|
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
|
@ -183,6 +183,9 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (MLX5_CAP_GEN(dev, debug))
|
||||||
|
mlx5_core_get_caps(dev, MLX5_CAP_DEBUG);
|
||||||
|
|
||||||
if (MLX5_CAP_GEN(dev, pcam_reg))
|
if (MLX5_CAP_GEN(dev, pcam_reg))
|
||||||
mlx5_get_pcam_reg(dev);
|
mlx5_get_pcam_reg(dev);
|
||||||
|
|
||||||
|
@ -483,6 +483,17 @@ int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mlx5_core_query_ib_ppcnt);
|
EXPORT_SYMBOL_GPL(mlx5_core_query_ib_ppcnt);
|
||||||
|
|
||||||
|
static int mlx5_query_pfcc_reg(struct mlx5_core_dev *dev, u32 *out,
|
||||||
|
u32 out_size)
|
||||||
|
{
|
||||||
|
u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
|
||||||
|
|
||||||
|
MLX5_SET(pfcc_reg, in, local_port, 1);
|
||||||
|
|
||||||
|
return mlx5_core_access_reg(dev, in, sizeof(in), out,
|
||||||
|
out_size, MLX5_REG_PFCC, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
|
int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
|
||||||
{
|
{
|
||||||
u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
|
u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
|
||||||
@ -500,13 +511,10 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
|
|||||||
int mlx5_query_port_pause(struct mlx5_core_dev *dev,
|
int mlx5_query_port_pause(struct mlx5_core_dev *dev,
|
||||||
u32 *rx_pause, u32 *tx_pause)
|
u32 *rx_pause, u32 *tx_pause)
|
||||||
{
|
{
|
||||||
u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
|
|
||||||
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
|
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
MLX5_SET(pfcc_reg, in, local_port, 1);
|
err = mlx5_query_pfcc_reg(dev, out, sizeof(out));
|
||||||
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
|
|
||||||
sizeof(out), MLX5_REG_PFCC, 0, 0);
|
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
@ -520,6 +528,49 @@ int mlx5_query_port_pause(struct mlx5_core_dev *dev,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
|
EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
|
||||||
|
|
||||||
|
int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
|
||||||
|
u16 stall_critical_watermark,
|
||||||
|
u16 stall_minor_watermark)
|
||||||
|
{
|
||||||
|
u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
|
||||||
|
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
|
||||||
|
|
||||||
|
MLX5_SET(pfcc_reg, in, local_port, 1);
|
||||||
|
MLX5_SET(pfcc_reg, in, pptx_mask_n, 1);
|
||||||
|
MLX5_SET(pfcc_reg, in, pprx_mask_n, 1);
|
||||||
|
MLX5_SET(pfcc_reg, in, ppan_mask_n, 1);
|
||||||
|
MLX5_SET(pfcc_reg, in, critical_stall_mask, 1);
|
||||||
|
MLX5_SET(pfcc_reg, in, minor_stall_mask, 1);
|
||||||
|
MLX5_SET(pfcc_reg, in, device_stall_critical_watermark,
|
||||||
|
stall_critical_watermark);
|
||||||
|
MLX5_SET(pfcc_reg, in, device_stall_minor_watermark, stall_minor_watermark);
|
||||||
|
|
||||||
|
return mlx5_core_access_reg(dev, in, sizeof(in), out,
|
||||||
|
sizeof(out), MLX5_REG_PFCC, 0, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
|
||||||
|
u16 *stall_critical_watermark,
|
||||||
|
u16 *stall_minor_watermark)
|
||||||
|
{
|
||||||
|
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = mlx5_query_pfcc_reg(dev, out, sizeof(out));
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
if (stall_critical_watermark)
|
||||||
|
*stall_critical_watermark = MLX5_GET(pfcc_reg, out,
|
||||||
|
device_stall_critical_watermark);
|
||||||
|
|
||||||
|
if (stall_minor_watermark)
|
||||||
|
*stall_minor_watermark = MLX5_GET(pfcc_reg, out,
|
||||||
|
device_stall_minor_watermark);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx)
|
int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx)
|
||||||
{
|
{
|
||||||
u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
|
u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
|
||||||
@ -538,13 +589,10 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pfc);
|
|||||||
|
|
||||||
int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
|
int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
|
||||||
{
|
{
|
||||||
u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
|
|
||||||
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
|
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
MLX5_SET(pfcc_reg, in, local_port, 1);
|
err = mlx5_query_pfcc_reg(dev, out, sizeof(out));
|
||||||
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
|
|
||||||
sizeof(out), MLX5_REG_PFCC, 0, 0);
|
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@ -1070,6 +1070,32 @@ free:
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
|
EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
|
||||||
|
|
||||||
|
int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
|
||||||
|
u64 *rx_discard_vport_down,
|
||||||
|
u64 *tx_discard_vport_down)
|
||||||
|
{
|
||||||
|
u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {0};
|
||||||
|
u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
|
||||||
|
int err;
|
||||||
|
|
||||||
|
MLX5_SET(query_vnic_env_in, in, opcode,
|
||||||
|
MLX5_CMD_OP_QUERY_VNIC_ENV);
|
||||||
|
MLX5_SET(query_vnic_env_in, in, op_mod, 0);
|
||||||
|
MLX5_SET(query_vnic_env_in, in, vport_number, vport);
|
||||||
|
if (vport)
|
||||||
|
MLX5_SET(query_vnic_env_in, in, other_vport, 1);
|
||||||
|
|
||||||
|
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
*rx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
|
||||||
|
vport_env.receive_discard_vport_down);
|
||||||
|
*tx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
|
||||||
|
vport_env.transmit_discard_vport_down);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
|
int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
|
||||||
u8 other_vport, u8 port_num,
|
u8 other_vport, u8 port_num,
|
||||||
int vf,
|
int vf,
|
||||||
|
@ -1013,6 +1013,7 @@ enum mlx5_cap_type {
|
|||||||
MLX5_CAP_RESERVED,
|
MLX5_CAP_RESERVED,
|
||||||
MLX5_CAP_VECTOR_CALC,
|
MLX5_CAP_VECTOR_CALC,
|
||||||
MLX5_CAP_QOS,
|
MLX5_CAP_QOS,
|
||||||
|
MLX5_CAP_DEBUG,
|
||||||
/* NUM OF CAP Types */
|
/* NUM OF CAP Types */
|
||||||
MLX5_CAP_NUM
|
MLX5_CAP_NUM
|
||||||
};
|
};
|
||||||
@ -1140,6 +1141,9 @@ enum mlx5_qcam_feature_groups {
|
|||||||
#define MLX5_CAP_QOS(mdev, cap)\
|
#define MLX5_CAP_QOS(mdev, cap)\
|
||||||
MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap)
|
MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap)
|
||||||
|
|
||||||
|
#define MLX5_CAP_DEBUG(mdev, cap)\
|
||||||
|
MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap)
|
||||||
|
|
||||||
#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
|
#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
|
||||||
MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
|
MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
|
||||||
|
|
||||||
|
@ -142,6 +142,12 @@ struct mlx5_flow_group *
|
|||||||
mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
|
mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
|
||||||
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
|
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
|
||||||
|
|
||||||
|
struct mlx5_fs_vlan {
|
||||||
|
u16 ethtype;
|
||||||
|
u16 vid;
|
||||||
|
u8 prio;
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx5_flow_act {
|
struct mlx5_flow_act {
|
||||||
u32 action;
|
u32 action;
|
||||||
bool has_flow_tag;
|
bool has_flow_tag;
|
||||||
@ -149,6 +155,7 @@ struct mlx5_flow_act {
|
|||||||
u32 encap_id;
|
u32 encap_id;
|
||||||
u32 modify_id;
|
u32 modify_id;
|
||||||
uintptr_t esp_id;
|
uintptr_t esp_id;
|
||||||
|
struct mlx5_fs_vlan vlan;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define MLX5_DECLARE_FLOW_ACT(name) \
|
#define MLX5_DECLARE_FLOW_ACT(name) \
|
||||||
|
@ -143,6 +143,7 @@ enum {
|
|||||||
MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763,
|
MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763,
|
||||||
MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764,
|
MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764,
|
||||||
MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765,
|
MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765,
|
||||||
|
MLX5_CMD_OP_QUERY_VNIC_ENV = 0x76f,
|
||||||
MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770,
|
MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770,
|
||||||
MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
|
MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
|
||||||
MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
|
MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
|
||||||
@ -313,7 +314,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
|
|||||||
u8 flow_table_modify[0x1];
|
u8 flow_table_modify[0x1];
|
||||||
u8 encap[0x1];
|
u8 encap[0x1];
|
||||||
u8 decap[0x1];
|
u8 decap[0x1];
|
||||||
u8 reserved_at_9[0x17];
|
u8 reserved_at_9[0x1];
|
||||||
|
u8 pop_vlan[0x1];
|
||||||
|
u8 push_vlan[0x1];
|
||||||
|
u8 reserved_at_c[0x14];
|
||||||
|
|
||||||
u8 reserved_at_20[0x2];
|
u8 reserved_at_20[0x2];
|
||||||
u8 log_max_ft_size[0x6];
|
u8 log_max_ft_size[0x6];
|
||||||
@ -593,6 +597,16 @@ struct mlx5_ifc_qos_cap_bits {
|
|||||||
u8 reserved_at_100[0x700];
|
u8 reserved_at_100[0x700];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct mlx5_ifc_debug_cap_bits {
|
||||||
|
u8 reserved_at_0[0x20];
|
||||||
|
|
||||||
|
u8 reserved_at_20[0x2];
|
||||||
|
u8 stall_detect[0x1];
|
||||||
|
u8 reserved_at_23[0x1d];
|
||||||
|
|
||||||
|
u8 reserved_at_40[0x7c0];
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
|
struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
|
||||||
u8 csum_cap[0x1];
|
u8 csum_cap[0x1];
|
||||||
u8 vlan_cap[0x1];
|
u8 vlan_cap[0x1];
|
||||||
@ -855,7 +869,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
|||||||
u8 out_of_seq_cnt[0x1];
|
u8 out_of_seq_cnt[0x1];
|
||||||
u8 vport_counters[0x1];
|
u8 vport_counters[0x1];
|
||||||
u8 retransmission_q_counters[0x1];
|
u8 retransmission_q_counters[0x1];
|
||||||
u8 reserved_at_183[0x1];
|
u8 debug[0x1];
|
||||||
u8 modify_rq_counter_set_id[0x1];
|
u8 modify_rq_counter_set_id[0x1];
|
||||||
u8 rq_delay_drop[0x1];
|
u8 rq_delay_drop[0x1];
|
||||||
u8 max_qp_cnt[0xa];
|
u8 max_qp_cnt[0xa];
|
||||||
@ -865,7 +879,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
|||||||
u8 vhca_group_manager[0x1];
|
u8 vhca_group_manager[0x1];
|
||||||
u8 ib_virt[0x1];
|
u8 ib_virt[0x1];
|
||||||
u8 eth_virt[0x1];
|
u8 eth_virt[0x1];
|
||||||
u8 reserved_at_1a4[0x1];
|
u8 vnic_env_queue_counters[0x1];
|
||||||
u8 ets[0x1];
|
u8 ets[0x1];
|
||||||
u8 nic_flow_table[0x1];
|
u8 nic_flow_table[0x1];
|
||||||
u8 eswitch_flow_table[0x1];
|
u8 eswitch_flow_table[0x1];
|
||||||
@ -997,7 +1011,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
|||||||
u8 reserved_at_330[0xb];
|
u8 reserved_at_330[0xb];
|
||||||
u8 log_max_xrcd[0x5];
|
u8 log_max_xrcd[0x5];
|
||||||
|
|
||||||
u8 reserved_at_340[0x8];
|
u8 nic_receive_steering_discard[0x1];
|
||||||
|
u8 receive_discard_vport_down[0x1];
|
||||||
|
u8 transmit_discard_vport_down[0x1];
|
||||||
|
u8 reserved_at_343[0x5];
|
||||||
u8 log_max_flow_counter_bulk[0x8];
|
u8 log_max_flow_counter_bulk[0x8];
|
||||||
u8 max_flow_counter_15_0[0x10];
|
u8 max_flow_counter_15_0[0x10];
|
||||||
|
|
||||||
@ -1572,7 +1589,17 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
|
|||||||
|
|
||||||
u8 rx_pause_transition_low[0x20];
|
u8 rx_pause_transition_low[0x20];
|
||||||
|
|
||||||
u8 reserved_at_3c0[0x400];
|
u8 reserved_at_3c0[0x40];
|
||||||
|
|
||||||
|
u8 device_stall_minor_watermark_cnt_high[0x20];
|
||||||
|
|
||||||
|
u8 device_stall_minor_watermark_cnt_low[0x20];
|
||||||
|
|
||||||
|
u8 device_stall_critical_watermark_cnt_high[0x20];
|
||||||
|
|
||||||
|
u8 device_stall_critical_watermark_cnt_low[0x20];
|
||||||
|
|
||||||
|
u8 reserved_at_480[0x340];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
|
struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
|
||||||
@ -2287,10 +2314,19 @@ enum {
|
|||||||
MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10,
|
MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10,
|
||||||
MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20,
|
MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20,
|
||||||
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40,
|
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40,
|
||||||
|
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80,
|
||||||
|
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct mlx5_ifc_vlan_bits {
|
||||||
|
u8 ethtype[0x10];
|
||||||
|
u8 prio[0x3];
|
||||||
|
u8 cfi[0x1];
|
||||||
|
u8 vid[0xc];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_ifc_flow_context_bits {
|
struct mlx5_ifc_flow_context_bits {
|
||||||
u8 reserved_at_0[0x20];
|
struct mlx5_ifc_vlan_bits push_vlan;
|
||||||
|
|
||||||
u8 group_id[0x20];
|
u8 group_id[0x20];
|
||||||
|
|
||||||
@ -2366,6 +2402,24 @@ struct mlx5_ifc_xrc_srqc_bits {
|
|||||||
u8 reserved_at_180[0x80];
|
u8 reserved_at_180[0x80];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct mlx5_ifc_vnic_diagnostic_statistics_bits {
|
||||||
|
u8 counter_error_queues[0x20];
|
||||||
|
|
||||||
|
u8 total_error_queues[0x20];
|
||||||
|
|
||||||
|
u8 send_queue_priority_update_flow[0x20];
|
||||||
|
|
||||||
|
u8 reserved_at_60[0x20];
|
||||||
|
|
||||||
|
u8 nic_receive_steering_discard[0x40];
|
||||||
|
|
||||||
|
u8 receive_discard_vport_down[0x40];
|
||||||
|
|
||||||
|
u8 transmit_discard_vport_down[0x40];
|
||||||
|
|
||||||
|
u8 reserved_at_140[0xec0];
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx5_ifc_traffic_counter_bits {
|
struct mlx5_ifc_traffic_counter_bits {
|
||||||
u8 packets[0x40];
|
u8 packets[0x40];
|
||||||
|
|
||||||
@ -3641,6 +3695,35 @@ struct mlx5_ifc_query_vport_state_in_bits {
|
|||||||
u8 reserved_at_60[0x20];
|
u8 reserved_at_60[0x20];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct mlx5_ifc_query_vnic_env_out_bits {
|
||||||
|
u8 status[0x8];
|
||||||
|
u8 reserved_at_8[0x18];
|
||||||
|
|
||||||
|
u8 syndrome[0x20];
|
||||||
|
|
||||||
|
u8 reserved_at_40[0x40];
|
||||||
|
|
||||||
|
struct mlx5_ifc_vnic_diagnostic_statistics_bits vport_env;
|
||||||
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
MLX5_QUERY_VNIC_ENV_IN_OP_MOD_VPORT_DIAG_STATISTICS = 0x0,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct mlx5_ifc_query_vnic_env_in_bits {
|
||||||
|
u8 opcode[0x10];
|
||||||
|
u8 reserved_at_10[0x10];
|
||||||
|
|
||||||
|
u8 reserved_at_20[0x10];
|
||||||
|
u8 op_mod[0x10];
|
||||||
|
|
||||||
|
u8 other_vport[0x1];
|
||||||
|
u8 reserved_at_41[0xf];
|
||||||
|
u8 vport_number[0x10];
|
||||||
|
|
||||||
|
u8 reserved_at_60[0x20];
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx5_ifc_query_vport_counter_out_bits {
|
struct mlx5_ifc_query_vport_counter_out_bits {
|
||||||
u8 status[0x8];
|
u8 status[0x8];
|
||||||
u8 reserved_at_8[0x18];
|
u8 reserved_at_8[0x18];
|
||||||
@ -7813,7 +7896,11 @@ struct mlx5_ifc_pifr_reg_bits {
|
|||||||
struct mlx5_ifc_pfcc_reg_bits {
|
struct mlx5_ifc_pfcc_reg_bits {
|
||||||
u8 reserved_at_0[0x8];
|
u8 reserved_at_0[0x8];
|
||||||
u8 local_port[0x8];
|
u8 local_port[0x8];
|
||||||
u8 reserved_at_10[0x10];
|
u8 reserved_at_10[0xb];
|
||||||
|
u8 ppan_mask_n[0x1];
|
||||||
|
u8 minor_stall_mask[0x1];
|
||||||
|
u8 critical_stall_mask[0x1];
|
||||||
|
u8 reserved_at_1e[0x2];
|
||||||
|
|
||||||
u8 ppan[0x4];
|
u8 ppan[0x4];
|
||||||
u8 reserved_at_24[0x4];
|
u8 reserved_at_24[0x4];
|
||||||
@ -7823,17 +7910,22 @@ struct mlx5_ifc_pfcc_reg_bits {
|
|||||||
|
|
||||||
u8 pptx[0x1];
|
u8 pptx[0x1];
|
||||||
u8 aptx[0x1];
|
u8 aptx[0x1];
|
||||||
u8 reserved_at_42[0x6];
|
u8 pptx_mask_n[0x1];
|
||||||
|
u8 reserved_at_43[0x5];
|
||||||
u8 pfctx[0x8];
|
u8 pfctx[0x8];
|
||||||
u8 reserved_at_50[0x10];
|
u8 reserved_at_50[0x10];
|
||||||
|
|
||||||
u8 pprx[0x1];
|
u8 pprx[0x1];
|
||||||
u8 aprx[0x1];
|
u8 aprx[0x1];
|
||||||
u8 reserved_at_62[0x6];
|
u8 pprx_mask_n[0x1];
|
||||||
|
u8 reserved_at_63[0x5];
|
||||||
u8 pfcrx[0x8];
|
u8 pfcrx[0x8];
|
||||||
u8 reserved_at_70[0x10];
|
u8 reserved_at_70[0x10];
|
||||||
|
|
||||||
u8 reserved_at_80[0x80];
|
u8 device_stall_minor_watermark[0x10];
|
||||||
|
u8 device_stall_critical_watermark[0x10];
|
||||||
|
|
||||||
|
u8 reserved_at_a0[0x60];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_ifc_pelc_reg_bits {
|
struct mlx5_ifc_pelc_reg_bits {
|
||||||
@ -7874,8 +7966,10 @@ struct mlx5_ifc_peir_reg_bits {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_ifc_pcam_enhanced_features_bits {
|
struct mlx5_ifc_pcam_enhanced_features_bits {
|
||||||
u8 reserved_at_0[0x7b];
|
u8 reserved_at_0[0x76];
|
||||||
|
|
||||||
|
u8 pfcc_mask[0x1];
|
||||||
|
u8 reserved_at_77[0x4];
|
||||||
u8 rx_buffer_fullness_counters[0x1];
|
u8 rx_buffer_fullness_counters[0x1];
|
||||||
u8 ptys_connector_type[0x1];
|
u8 ptys_connector_type[0x1];
|
||||||
u8 reserved_at_7d[0x1];
|
u8 reserved_at_7d[0x1];
|
||||||
|
@ -151,6 +151,12 @@ int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
|
|||||||
int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
|
int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
|
||||||
u8 *pfc_en_rx);
|
u8 *pfc_en_rx);
|
||||||
|
|
||||||
|
int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
|
||||||
|
u16 stall_critical_watermark,
|
||||||
|
u16 stall_minor_watermark);
|
||||||
|
int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
|
||||||
|
u16 *stall_critical_watermark, u16 *stall_minor_watermark);
|
||||||
|
|
||||||
int mlx5_max_tc(struct mlx5_core_dev *mdev);
|
int mlx5_max_tc(struct mlx5_core_dev *mdev);
|
||||||
|
|
||||||
int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
|
int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
|
||||||
|
@ -107,6 +107,9 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
|
|||||||
|
|
||||||
int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
|
int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
|
||||||
int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev);
|
int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev);
|
||||||
|
int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
|
||||||
|
u64 *rx_discard_vport_down,
|
||||||
|
u64 *tx_discard_vport_down);
|
||||||
int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
|
int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
|
||||||
int vf, u8 port_num, void *out,
|
int vf, u8 port_num, void *out,
|
||||||
size_t out_sz);
|
size_t out_sz);
|
||||||
|
@ -217,10 +217,14 @@ struct ethtool_value {
|
|||||||
__u32 data;
|
__u32 data;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define PFC_STORM_PREVENTION_AUTO 0xffff
|
||||||
|
#define PFC_STORM_PREVENTION_DISABLE 0
|
||||||
|
|
||||||
enum tunable_id {
|
enum tunable_id {
|
||||||
ETHTOOL_ID_UNSPEC,
|
ETHTOOL_ID_UNSPEC,
|
||||||
ETHTOOL_RX_COPYBREAK,
|
ETHTOOL_RX_COPYBREAK,
|
||||||
ETHTOOL_TX_COPYBREAK,
|
ETHTOOL_TX_COPYBREAK,
|
||||||
|
ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */
|
||||||
/*
|
/*
|
||||||
* Add your fresh new tubale attribute above and remember to update
|
* Add your fresh new tubale attribute above and remember to update
|
||||||
* tunable_strings[] in net/core/ethtool.c
|
* tunable_strings[] in net/core/ethtool.c
|
||||||
|
@ -121,6 +121,7 @@ tunable_strings[__ETHTOOL_TUNABLE_COUNT][ETH_GSTRING_LEN] = {
|
|||||||
[ETHTOOL_ID_UNSPEC] = "Unspec",
|
[ETHTOOL_ID_UNSPEC] = "Unspec",
|
||||||
[ETHTOOL_RX_COPYBREAK] = "rx-copybreak",
|
[ETHTOOL_RX_COPYBREAK] = "rx-copybreak",
|
||||||
[ETHTOOL_TX_COPYBREAK] = "tx-copybreak",
|
[ETHTOOL_TX_COPYBREAK] = "tx-copybreak",
|
||||||
|
[ETHTOOL_PFC_PREVENTION_TOUT] = "pfc-prevention-tout",
|
||||||
};
|
};
|
||||||
|
|
||||||
static const char
|
static const char
|
||||||
@ -2311,6 +2312,11 @@ static int ethtool_tunable_valid(const struct ethtool_tunable *tuna)
|
|||||||
tuna->type_id != ETHTOOL_TUNABLE_U32)
|
tuna->type_id != ETHTOOL_TUNABLE_U32)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
break;
|
break;
|
||||||
|
case ETHTOOL_PFC_PREVENTION_TOUT:
|
||||||
|
if (tuna->len != sizeof(u16) ||
|
||||||
|
tuna->type_id != ETHTOOL_TUNABLE_U16)
|
||||||
|
return -EINVAL;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user