mlx5e-failsafe 27-03-2017

This series provides a fail-safe mechanism to allow safely re-configuring
 mlx5e netdevice and provides a resiliency against sporadic
 configuration failures.
 
 To enable this we do some refactoring and code reorganizing to allow
 breaking the drivers open/close flows to stages:
       open -> activate -> deactivate -> close.
 
 In addition we need to allow creating fresh HW ring resources
 (mlx5e_channels) with their own "new" set of parameters, while keeping
 the current ones running and active until the new channels are
 successfully created with the new configuration, and only then we can
 safly replace (switch) old channels with new ones.
 
 For that we introduce mlx5e_channels object and an API to manage it:
  - channels = open_channels(new_params):
    open fresh TX/RX channels
  - activate_channels(channels):
    redirect traffic to them and attach them to the netdev
  - deactivate_channes(channels)
    stop traffic and detach from netdev
  - close(channels)
    Free the TX/RX HW resources of those channels
 
 With the above strategy it is straightforward to achieve the desired
 behavior of fail-safe configuration.  In pseudo code:
 
 make_new_config(new_params)
 {
 	old_channels = current_active_channels;
 	new_channels = create_channels(new_params);
 	if (!new_channels)
 		return "Failed, but current channels are still active :)"
 
 	deactivate_channels(old_channels); /* Can't fail */
 	set_hw_new_state();                /* If needed  */
 	activate_channels(new_channels);   /* Can't fail */
 	close_channels(old_channels);
 	current_active_channels = new_channels;
 
         return "SUCCESS";
 }
 
 At the top of this series, we change the following flows to be fail-safe:
 ethtool:
    - ring parameters
    - coalesce parameters
    - tx copy break parameters
    - cqe compressing/moderation mode setting (priv flags)
 ndos:
    - tc setup
    - set features: LRO
    - change mtu
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJY2XfKAAoJEEg/ir3gV/o+6fAIAKBsqf+EYhbHA0JoTnV1sm3G
 PSGjj5VCMNTZPyDlTWLEpY2S5TIDRPvICC04i5jWFjo5SOmsRMR6ZV0llHukKC4k
 SAkAYU4A78Ds7UhmWzokebwzWa8VA48eqLRxXV60EAhJ0BOgzZnG09KIpzdplE7A
 pco+F/c/qzJa0NP1KQBBrYIcXbGMrCFcYM8d6lJ8TRfVDdZZpeTB/wvxRixKfe1L
 Ji6+k5tbDynDD3+HWkWq+chAkw4yldN7q8fC8FaN2r0mtWYsYbVSPuP+BlL0XN4R
 oluZEJjnyaCePaqUMW+ZYVb1hCGP7pOoJkBb901XdOnX5M2fU9vK3VufWErYF/s=
 =r6Qw
 -----END PGP SIGNATURE-----

Merge tag 'mlx5e-failsafe' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5e-failsafe 27-03-2017

This series provides a fail-safe mechanism to allow safely re-configuring
mlx5e netdevice and provides a resiliency against sporadic
configuration failures.

To enable this we do some refactoring and code reorganizing to allow
breaking the drivers open/close flows to stages:
      open -> activate -> deactivate -> close.

In addition we need to allow creating fresh HW ring resources
(mlx5e_channels) with their own "new" set of parameters, while keeping
the current ones running and active until the new channels are
successfully created with the new configuration, and only then we can
safly replace (switch) old channels with new ones.

For that we introduce mlx5e_channels object and an API to manage it:
 - channels = open_channels(new_params):
   open fresh TX/RX channels
 - activate_channels(channels):
   redirect traffic to them and attach them to the netdev
 - deactivate_channes(channels)
   stop traffic and detach from netdev
 - close(channels)
   Free the TX/RX HW resources of those channels

With the above strategy it is straightforward to achieve the desired
behavior of fail-safe configuration.  In pseudo code:

make_new_config(new_params)
{
	old_channels = current_active_channels;
	new_channels = create_channels(new_params);
	if (!new_channels)
		return "Failed, but current channels are still active :)"

	deactivate_channels(old_channels); /* Can't fail */
	set_hw_new_state();                /* If needed  */
	activate_channels(new_channels);   /* Can't fail */
	close_channels(old_channels);
	current_active_channels = new_channels;

        return "SUCCESS";
}

At the top of this series, we change the following flows to be fail-safe:
ethtool:
   - ring parameters
   - coalesce parameters
   - tx copy break parameters
   - cqe compressing/moderation mode setting (priv flags)
ndos:
   - tc setup
   - set features: LRO
   - change mtu
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-03-27 21:16:03 -07:00
commit cc628c9680
12 changed files with 1039 additions and 844 deletions

View File

@ -182,15 +182,15 @@ enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 1),
};
#define MLX5E_SET_PFLAG(priv, pflag, enable) \
#define MLX5E_SET_PFLAG(params, pflag, enable) \
do { \
if (enable) \
(priv)->params.pflags |= (pflag); \
(params)->pflags |= (pflag); \
else \
(priv)->params.pflags &= ~(pflag); \
(params)->pflags &= ~(pflag); \
} while (0)
#define MLX5E_GET_PFLAG(priv, pflag) (!!((priv)->params.pflags & (pflag)))
#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (pflag)))
#ifdef CONFIG_MLX5_CORE_EN_DCB
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
@ -213,7 +213,6 @@ struct mlx5e_params {
bool rx_cqe_compress_def;
struct mlx5e_cq_moder rx_cq_moderation;
struct mlx5e_cq_moder tx_cq_moderation;
u16 min_rx_wqes;
bool lro_en;
u32 lro_wqe_sz;
u16 tx_max_inline;
@ -225,6 +224,7 @@ struct mlx5e_params {
bool rx_am_enabled;
u32 lro_timeout;
u32 pflags;
struct bpf_prog *xdp_prog;
};
#ifdef CONFIG_MLX5_CORE_EN_DCB
@ -280,7 +280,6 @@ struct mlx5e_cq {
struct napi_struct *napi;
struct mlx5_core_cq mcq;
struct mlx5e_channel *channel;
struct mlx5e_priv *priv;
/* cqe decompression */
struct mlx5_cqe64 title;
@ -290,6 +289,7 @@ struct mlx5e_cq {
u16 decmprs_wqe_counter;
/* control */
struct mlx5_core_dev *mdev;
struct mlx5_frag_wq_ctrl wq_ctrl;
} ____cacheline_aligned_in_smp;
@ -357,7 +357,7 @@ struct mlx5e_txqsq {
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel;
int tc;
int txq_ix;
u32 rate_limit;
} ____cacheline_aligned_in_smp;
@ -533,7 +533,7 @@ struct mlx5e_rq {
u32 mpwqe_num_strides;
u32 rqn;
struct mlx5e_channel *channel;
struct mlx5e_priv *priv;
struct mlx5_core_dev *mdev;
struct mlx5_core_mkey umr_mkey;
} ____cacheline_aligned_in_smp;
@ -556,10 +556,18 @@ struct mlx5e_channel {
/* control */
struct mlx5e_priv *priv;
struct mlx5_core_dev *mdev;
struct mlx5e_tstamp *tstamp;
int ix;
int cpu;
};
struct mlx5e_channels {
struct mlx5e_channel **c;
unsigned int num;
struct mlx5e_params params;
};
enum mlx5e_traffic_types {
MLX5E_TT_IPV4_TCP,
MLX5E_TT_IPV6_TCP,
@ -709,34 +717,17 @@ enum {
MLX5E_NIC_PRIO
};
struct mlx5e_profile {
void (*init)(struct mlx5_core_dev *mdev,
struct net_device *netdev,
const struct mlx5e_profile *profile, void *ppriv);
void (*cleanup)(struct mlx5e_priv *priv);
int (*init_rx)(struct mlx5e_priv *priv);
void (*cleanup_rx)(struct mlx5e_priv *priv);
int (*init_tx)(struct mlx5e_priv *priv);
void (*cleanup_tx)(struct mlx5e_priv *priv);
void (*enable)(struct mlx5e_priv *priv);
void (*disable)(struct mlx5e_priv *priv);
void (*update_stats)(struct mlx5e_priv *priv);
int (*max_nch)(struct mlx5_core_dev *mdev);
int max_tc;
};
struct mlx5e_priv {
/* priv data path fields - start */
struct mlx5e_txqsq **txq_to_sq_map;
int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
struct bpf_prog *xdp_prog;
struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
/* priv data path fields - end */
unsigned long state;
struct mutex state_lock; /* Protects Interface state */
struct mlx5e_rq drop_rq;
struct mlx5e_channel **channel;
struct mlx5e_channels channels;
u32 tisn[MLX5E_MAX_NUM_TC];
struct mlx5e_rqt indir_rqt;
struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
@ -746,7 +737,6 @@ struct mlx5e_priv {
struct mlx5e_flow_steering fs;
struct mlx5e_vxlan_db vxlan;
struct mlx5e_params params;
struct workqueue_struct *wq;
struct work_struct update_carrier_work;
struct work_struct set_rx_mode_work;
@ -766,6 +756,22 @@ struct mlx5e_priv {
void *ppriv;
};
struct mlx5e_profile {
void (*init)(struct mlx5_core_dev *mdev,
struct net_device *netdev,
const struct mlx5e_profile *profile, void *ppriv);
void (*cleanup)(struct mlx5e_priv *priv);
int (*init_rx)(struct mlx5e_priv *priv);
void (*cleanup_rx)(struct mlx5e_priv *priv);
int (*init_tx)(struct mlx5e_priv *priv);
void (*cleanup_tx)(struct mlx5e_priv *priv);
void (*enable)(struct mlx5e_priv *priv);
void (*disable)(struct mlx5e_priv *priv);
void (*update_stats)(struct mlx5e_priv *priv);
int (*max_nch)(struct mlx5_core_dev *mdev);
int max_tc;
};
void mlx5e_build_ptys2ethtool_map(void);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
@ -827,7 +833,7 @@ void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
struct ptp_clock_event *event);
int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
@ -836,14 +842,40 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd);
int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
enum mlx5e_traffic_types tt);
struct mlx5e_redirect_rqt_param {
bool is_rss;
union {
u32 rqn; /* Direct RQN (Non-RSS) */
struct {
u8 hfunc;
struct mlx5e_channels *channels;
} rss; /* RSS data */
};
};
int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
struct mlx5e_redirect_rqt_param rrp);
void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
enum mlx5e_traffic_types tt,
void *tirc);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
int mlx5e_open_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *chs);
void mlx5e_close_channels(struct mlx5e_channels *chs);
/* Function pointer to be used to modify WH settings while
* switching channels
*/
typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *new_chs,
mlx5e_fp_hw_modify hw_modify);
void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
u32 *indirection_rqt, int len,
int num_channels);
@ -851,7 +883,8 @@ int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
u8 cq_period_mode);
void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type);
void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, u8 rq_type);
static inline
struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
@ -942,8 +975,7 @@ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir);
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev,
bool enable_uc_lb);
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb);
struct mlx5_eswitch_rep;
int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,

View File

@ -90,6 +90,7 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct hwtstamp_config config;
int err;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
return -EOPNOTSUPP;
@ -111,7 +112,7 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
/* Reset CQE compression to Admin default */
mlx5e_modify_rx_cqe_compression_locked(priv, priv->params.rx_cqe_compress_def);
mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
@ -129,7 +130,12 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
/* Disable CQE compression */
netdev_warn(dev, "Disabling cqe compression");
mlx5e_modify_rx_cqe_compression_locked(priv, false);
err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
if (err) {
netdev_err(dev, "Failed disabling cqe compression err=%d\n", err);
mutex_unlock(&priv->state_lock);
return err;
}
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:

View File

@ -136,18 +136,20 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
mlx5_core_dealloc_pd(mdev, res->pdn);
}
int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev,
bool enable_uc_lb)
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_tir *tir;
void *in;
int err = -ENOMEM;
u32 tirn = 0;
int inlen;
int err = 0;
void *in;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
goto out;
if (enable_uc_lb)
MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
@ -156,13 +158,16 @@ int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev,
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen);
tirn = tir->tirn;
err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
if (err)
goto out;
}
out:
kvfree(in);
if (err)
netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
return err;
}

View File

@ -152,12 +152,9 @@ static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
}
#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
#define MLX5E_NUM_RQ_STATS(priv) \
(NUM_RQ_STATS * priv->params.num_channels * \
test_bit(MLX5E_STATE_OPENED, &priv->state))
#define MLX5E_NUM_RQ_STATS(priv) (NUM_RQ_STATS * (priv)->channels.num)
#define MLX5E_NUM_SQ_STATS(priv) \
(NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
test_bit(MLX5E_STATE_OPENED, &priv->state))
(NUM_SQ_STATS * (priv)->channels.num * (priv)->channels.params.num_tc)
#define MLX5E_NUM_PFC_COUNTERS(priv) \
((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \
NUM_PPORT_PER_PRIO_PFC_COUNTERS)
@ -262,17 +259,17 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
return;
/* per channel counters */
for (i = 0; i < priv->params.num_channels; i++)
for (i = 0; i < priv->channels.num; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
rq_stats_desc[j].format, i);
for (tc = 0; tc < priv->params.num_tc; tc++)
for (i = 0; i < priv->params.num_channels; i++)
for (tc = 0; tc < priv->channels.params.num_tc; tc++)
for (i = 0; i < priv->channels.num; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
sq_stats_desc[j].format,
priv->channeltc_to_txq_map[i][tc]);
priv->channel_tc2txq[i][tc]);
}
static void mlx5e_get_strings(struct net_device *dev,
@ -303,6 +300,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_channels *channels;
struct mlx5_priv *mlx5_priv;
int i, j, tc, prio, idx = 0;
unsigned long pfc_combined;
@ -313,6 +311,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_update_stats(priv);
channels = &priv->channels;
mutex_unlock(&priv->state_lock);
for (i = 0; i < NUM_SW_COUNTERS; i++)
@ -382,16 +381,16 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
return;
/* per channel counters */
for (i = 0; i < priv->params.num_channels; i++)
for (i = 0; i < channels->num; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
data[idx++] =
MLX5E_READ_CTR64_CPU(&priv->channel[i]->rq.stats,
MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
rq_stats_desc, j);
for (tc = 0; tc < priv->params.num_tc; tc++)
for (i = 0; i < priv->params.num_channels; i++)
for (tc = 0; tc < priv->channels.params.num_tc; tc++)
for (i = 0; i < channels->num; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
data[idx++] = MLX5E_READ_CTR64_CPU(&priv->channel[i]->sq[tc].stats,
data[idx++] = MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats,
sq_stats_desc, j);
}
@ -406,8 +405,8 @@ static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
return num_wqe;
stride_size = 1 << priv->params.mpwqe_log_stride_sz;
num_strides = 1 << priv->params.mpwqe_log_num_strides;
stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz;
num_strides = 1 << priv->channels.params.mpwqe_log_num_strides;
wqe_size = stride_size * num_strides;
packets_per_wqe = wqe_size /
@ -427,8 +426,8 @@ static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
return num_packets;
stride_size = 1 << priv->params.mpwqe_log_stride_sz;
num_strides = 1 << priv->params.mpwqe_log_num_strides;
stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz;
num_strides = 1 << priv->channels.params.mpwqe_log_num_strides;
wqe_size = stride_size * num_strides;
num_packets = (1 << order_base_2(num_packets));
@ -443,26 +442,25 @@ static void mlx5e_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int rq_wq_type = priv->params.rq_wq_type;
int rq_wq_type = priv->channels.params.rq_wq_type;
param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
1 << mlx5_max_log_rq_size(rq_wq_type));
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
1 << priv->params.log_rq_size);
param->tx_pending = 1 << priv->params.log_sq_size;
1 << priv->channels.params.log_rq_size);
param->tx_pending = 1 << priv->channels.params.log_sq_size;
}
static int mlx5e_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
bool was_opened;
int rq_wq_type = priv->params.rq_wq_type;
int rq_wq_type = priv->channels.params.rq_wq_type;
struct mlx5e_channels new_channels = {};
u32 rx_pending_wqes;
u32 min_rq_size;
u32 max_rq_size;
u16 min_rx_wqes;
u8 log_rq_size;
u8 log_sq_size;
u32 num_mtts;
@ -500,7 +498,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
}
num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
!MLX5E_VALID_NUM_MTTS(num_mtts)) {
netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
__func__, param->rx_pending);
@ -522,26 +520,29 @@ static int mlx5e_set_ringparam(struct net_device *dev,
log_rq_size = order_base_2(rx_pending_wqes);
log_sq_size = order_base_2(param->tx_pending);
min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, rx_pending_wqes);
if (log_rq_size == priv->params.log_rq_size &&
log_sq_size == priv->params.log_sq_size &&
min_rx_wqes == priv->params.min_rx_wqes)
if (log_rq_size == priv->channels.params.log_rq_size &&
log_sq_size == priv->channels.params.log_sq_size)
return 0;
mutex_lock(&priv->state_lock);
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (was_opened)
mlx5e_close_locked(dev);
new_channels.params = priv->channels.params;
new_channels.params.log_rq_size = log_rq_size;
new_channels.params.log_sq_size = log_sq_size;
priv->params.log_rq_size = log_rq_size;
priv->params.log_sq_size = log_sq_size;
priv->params.min_rx_wqes = min_rx_wqes;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
goto unlock;
}
if (was_opened)
err = mlx5e_open_locked(dev);
err = mlx5e_open_channels(priv, &new_channels);
if (err)
goto unlock;
mlx5e_switch_priv_channels(priv, &new_channels, NULL);
unlock:
mutex_unlock(&priv->state_lock);
return err;
@ -553,7 +554,7 @@ static void mlx5e_get_channels(struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev);
ch->max_combined = priv->profile->max_nch(priv->mdev);
ch->combined_count = priv->params.num_channels;
ch->combined_count = priv->channels.params.num_channels;
}
static int mlx5e_set_channels(struct net_device *dev,
@ -561,8 +562,8 @@ static int mlx5e_set_channels(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
unsigned int count = ch->combined_count;
struct mlx5e_channels new_channels = {};
bool arfs_enabled;
bool was_opened;
int err = 0;
if (!count) {
@ -571,27 +572,32 @@ static int mlx5e_set_channels(struct net_device *dev,
return -EINVAL;
}
if (priv->params.num_channels == count)
if (priv->channels.params.num_channels == count)
return 0;
mutex_lock(&priv->state_lock);
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (was_opened)
mlx5e_close_locked(dev);
new_channels.params = priv->channels.params;
new_channels.params.num_channels = count;
mlx5e_build_default_indir_rqt(priv->mdev, new_channels.params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, count);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
goto out;
}
/* Create fresh channels with new parameters */
err = mlx5e_open_channels(priv, &new_channels);
if (err)
goto out;
arfs_enabled = dev->features & NETIF_F_NTUPLE;
if (arfs_enabled)
mlx5e_arfs_disable(priv);
priv->params.num_channels = count;
mlx5e_build_default_indir_rqt(priv->mdev, priv->params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, count);
if (was_opened)
err = mlx5e_open_locked(dev);
if (err)
goto out;
/* Switch to new channels, set new parameters and close old ones */
mlx5e_switch_priv_channels(priv, &new_channels, NULL);
if (arfs_enabled) {
err = mlx5e_arfs_enable(priv);
@ -614,49 +620,24 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
return -EOPNOTSUPP;
coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec;
coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
coal->tx_coalesce_usecs = priv->params.tx_cq_moderation.usec;
coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation.pkts;
coal->use_adaptive_rx_coalesce = priv->params.rx_am_enabled;
coal->rx_coalesce_usecs = priv->channels.params.rx_cq_moderation.usec;
coal->rx_max_coalesced_frames = priv->channels.params.rx_cq_moderation.pkts;
coal->tx_coalesce_usecs = priv->channels.params.tx_cq_moderation.usec;
coal->tx_max_coalesced_frames = priv->channels.params.tx_cq_moderation.pkts;
coal->use_adaptive_rx_coalesce = priv->channels.params.rx_am_enabled;
return 0;
}
static int mlx5e_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *coal)
static void
mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channel *c;
bool restart =
!!coal->use_adaptive_rx_coalesce != priv->params.rx_am_enabled;
bool was_opened;
int err = 0;
int tc;
int i;
if (!MLX5_CAP_GEN(mdev, cq_moderation))
return -EOPNOTSUPP;
mutex_lock(&priv->state_lock);
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (was_opened && restart) {
mlx5e_close_locked(netdev);
priv->params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce;
}
priv->params.tx_cq_moderation.usec = coal->tx_coalesce_usecs;
priv->params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
priv->params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
priv->params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
if (!was_opened || restart)
goto out;
for (i = 0; i < priv->params.num_channels; ++i) {
c = priv->channel[i];
for (i = 0; i < priv->channels.num; ++i) {
struct mlx5e_channel *c = priv->channels.c[i];
for (tc = 0; tc < c->num_tc; tc++) {
mlx5_core_modify_cq_moderation(mdev,
@ -669,11 +650,50 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
coal->rx_coalesce_usecs,
coal->rx_max_coalesced_frames);
}
}
static int mlx5e_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *coal)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
int err = 0;
bool reset;
if (!MLX5_CAP_GEN(mdev, cq_moderation))
return -EOPNOTSUPP;
mutex_lock(&priv->state_lock);
new_channels.params = priv->channels.params;
new_channels.params.tx_cq_moderation.usec = coal->tx_coalesce_usecs;
new_channels.params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
new_channels.params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
new_channels.params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
new_channels.params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
goto out;
}
/* we are opened */
reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_am_enabled;
if (!reset) {
mlx5e_set_priv_channels_coalesce(priv, coal);
priv->channels.params = new_channels.params;
goto out;
}
/* open fresh channels with new coal parameters */
err = mlx5e_open_channels(priv, &new_channels);
if (err)
goto out;
mlx5e_switch_priv_channels(priv, &new_channels, NULL);
out:
if (was_opened && restart)
err = mlx5e_open_locked(netdev);
mutex_unlock(&priv->state_lock);
return err;
}
@ -968,7 +988,7 @@ static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
return sizeof(priv->params.toeplitz_hash_key);
return sizeof(priv->channels.params.toeplitz_hash_key);
}
static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
@ -982,15 +1002,15 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
struct mlx5e_priv *priv = netdev_priv(netdev);
if (indir)
memcpy(indir, priv->params.indirection_rqt,
sizeof(priv->params.indirection_rqt));
memcpy(indir, priv->channels.params.indirection_rqt,
sizeof(priv->channels.params.indirection_rqt));
if (key)
memcpy(key, priv->params.toeplitz_hash_key,
sizeof(priv->params.toeplitz_hash_key));
memcpy(key, priv->channels.params.toeplitz_hash_key,
sizeof(priv->channels.params.toeplitz_hash_key));
if (hfunc)
*hfunc = priv->params.rss_hfunc;
*hfunc = priv->channels.params.rss_hfunc;
return 0;
}
@ -1006,7 +1026,7 @@ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
memset(tirc, 0, ctxlen);
mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc);
mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
}
}
@ -1030,25 +1050,33 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
mutex_lock(&priv->state_lock);
if (indir) {
u32 rqtn = priv->indir_rqt.rqtn;
memcpy(priv->params.indirection_rqt, indir,
sizeof(priv->params.indirection_rqt));
mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
}
if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
hfunc != priv->params.rss_hfunc) {
priv->params.rss_hfunc = hfunc;
hfunc != priv->channels.params.rss_hfunc) {
priv->channels.params.rss_hfunc = hfunc;
hash_changed = true;
}
if (indir) {
memcpy(priv->channels.params.indirection_rqt, indir,
sizeof(priv->channels.params.indirection_rqt));
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
u32 rqtn = priv->indir_rqt.rqtn;
struct mlx5e_redirect_rqt_param rrp = {
.is_rss = true,
.rss.hfunc = priv->channels.params.rss_hfunc,
.rss.channels = &priv->channels
};
mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
}
}
if (key) {
memcpy(priv->params.toeplitz_hash_key, key,
sizeof(priv->params.toeplitz_hash_key));
memcpy(priv->channels.params.toeplitz_hash_key, key,
sizeof(priv->channels.params.toeplitz_hash_key));
hash_changed = hash_changed ||
priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
priv->channels.params.rss_hfunc == ETH_RSS_HASH_TOP;
}
if (hash_changed)
@ -1069,7 +1097,7 @@ static int mlx5e_get_rxnfc(struct net_device *netdev,
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
info->data = priv->params.num_channels;
info->data = priv->channels.params.num_channels;
break;
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = priv->fs.ethtool.tot_num_rules;
@ -1097,7 +1125,7 @@ static int mlx5e_get_tunable(struct net_device *dev,
switch (tuna->id) {
case ETHTOOL_TX_COPYBREAK:
*(u32 *)data = priv->params.tx_max_inline;
*(u32 *)data = priv->channels.params.tx_max_inline;
break;
default:
err = -EINVAL;
@ -1113,9 +1141,11 @@ static int mlx5e_set_tunable(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
bool was_opened;
u32 val;
struct mlx5e_channels new_channels = {};
int err = 0;
u32 val;
mutex_lock(&priv->state_lock);
switch (tuna->id) {
case ETHTOOL_TX_COPYBREAK:
@ -1125,24 +1155,26 @@ static int mlx5e_set_tunable(struct net_device *dev,
break;
}
mutex_lock(&priv->state_lock);
new_channels.params = priv->channels.params;
new_channels.params.tx_max_inline = val;
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (was_opened)
mlx5e_close_locked(dev);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
break;
}
priv->params.tx_max_inline = val;
err = mlx5e_open_channels(priv, &new_channels);
if (err)
break;
mlx5e_switch_priv_channels(priv, &new_channels, NULL);
if (was_opened)
err = mlx5e_open_locked(dev);
mutex_unlock(&priv->state_lock);
break;
default:
err = -EINVAL;
break;
}
mutex_unlock(&priv->state_lock);
return err;
}
@ -1442,15 +1474,15 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
bool rx_mode_changed;
u8 rx_cq_period_mode;
int err = 0;
bool reset;
rx_cq_period_mode = enable ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
rx_mode_changed = rx_cq_period_mode != priv->params.rx_cq_period_mode;
rx_mode_changed = rx_cq_period_mode != priv->channels.params.rx_cq_period_mode;
if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
@ -1459,16 +1491,51 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
if (!rx_mode_changed)
return 0;
reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (reset)
mlx5e_close_locked(netdev);
new_channels.params = priv->channels.params;
mlx5e_set_rx_cq_mode_params(&new_channels.params, rx_cq_period_mode);
mlx5e_set_rx_cq_mode_params(&priv->params, rx_cq_period_mode);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
return 0;
}
if (reset)
err = mlx5e_open_locked(netdev);
err = mlx5e_open_channels(priv, &new_channels);
if (err)
return err;
return err;
mlx5e_switch_priv_channels(priv, &new_channels, NULL);
return 0;
}
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val)
{
bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS);
struct mlx5e_channels new_channels = {};
int err = 0;
if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
return new_val ? -EOPNOTSUPP : 0;
if (curr_val == new_val)
return 0;
new_channels.params = priv->channels.params;
MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
mlx5e_set_rq_type_params(priv->mdev, &new_channels.params,
new_channels.params.rq_wq_type);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
return 0;
}
err = mlx5e_open_channels(priv, &new_channels);
if (err)
return err;
mlx5e_switch_priv_channels(priv, &new_channels, NULL);
return 0;
}
static int set_pflag_rx_cqe_compress(struct net_device *netdev,
@ -1486,8 +1553,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
}
mlx5e_modify_rx_cqe_compression_locked(priv, enable);
priv->params.rx_cqe_compress_def = enable;
mlx5e_set_rq_type_params(priv, priv->params.rq_wq_type);
priv->channels.params.rx_cqe_compress_def = enable;
return 0;
}
@ -1499,7 +1565,7 @@ static int mlx5e_handle_pflag(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
bool enable = !!(wanted_flags & flag);
u32 changes = wanted_flags ^ priv->params.pflags;
u32 changes = wanted_flags ^ priv->channels.params.pflags;
int err;
if (!(changes & flag))
@ -1512,7 +1578,7 @@ static int mlx5e_handle_pflag(struct net_device *netdev,
return err;
}
MLX5E_SET_PFLAG(priv, flag, enable);
MLX5E_SET_PFLAG(&priv->channels.params, flag, enable);
return 0;
}
@ -1541,7 +1607,7 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
return priv->params.pflags;
return priv->channels.params.pflags;
}
static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)

View File

@ -390,7 +390,7 @@ static int validate_flow(struct mlx5e_priv *priv,
if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
return -EINVAL;
if (fs->ring_cookie >= priv->params.num_channels &&
if (fs->ring_cookie >= priv->channels.params.num_channels &&
fs->ring_cookie != RX_CLS_FLOW_DISC)
return -EINVAL;

File diff suppressed because it is too large Load Diff

View File

@ -102,14 +102,16 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
int i, j;
memset(s, 0, sizeof(*s));
for (i = 0; i < priv->params.num_channels; i++) {
rq_stats = &priv->channel[i]->rq.stats;
for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i];
rq_stats = &c->rq.stats;
s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes;
for (j = 0; j < priv->params.num_tc; j++) {
sq_stats = &priv->channel[i]->sq[j].stats;
for (j = 0; j < priv->channels.params.num_tc; j++) {
sq_stats = &c->sq[j].stats;
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
@ -187,22 +189,26 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_eswitch_rep *rep = priv->ppriv;
struct mlx5e_channel *c;
int n, tc, err, num_sqs = 0;
int n, tc, num_sqs = 0;
int err = -ENOMEM;
u16 *sqs;
sqs = kcalloc(priv->params.num_channels * priv->params.num_tc, sizeof(u16), GFP_KERNEL);
sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(u16), GFP_KERNEL);
if (!sqs)
return -ENOMEM;
goto out;
for (n = 0; n < priv->params.num_channels; n++) {
c = priv->channel[n];
for (n = 0; n < priv->channels.num; n++) {
c = priv->channels.c[n];
for (tc = 0; tc < c->num_tc; tc++)
sqs[num_sqs++] = c->sq[tc].sqn;
}
err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs);
kfree(sqs);
out:
if (err)
netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
return err;
}
@ -397,42 +403,23 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_get_offload_stats = mlx5e_get_offload_stats,
};
static void mlx5e_build_rep_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev,
const struct mlx5e_profile *profile,
void *ppriv)
static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
priv->params.log_sq_size =
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
priv->params.rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
priv->params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
BIT(priv->params.log_rq_size));
params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
priv->params.num_tc = 1;
priv->params.lro_wqe_sz =
MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
priv->mdev = mdev;
priv->netdev = netdev;
priv->params.num_channels = profile->max_nch(mdev);
priv->profile = profile;
priv->ppriv = ppriv;
mutex_init(&priv->state_lock);
INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
params->num_tc = 1;
params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
}
static void mlx5e_build_rep_netdev(struct net_device *netdev)
@ -458,7 +445,19 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
const struct mlx5e_profile *profile,
void *ppriv)
{
mlx5e_build_rep_netdev_priv(mdev, netdev, profile, ppriv);
struct mlx5e_priv *priv = netdev_priv(netdev);
priv->mdev = mdev;
priv->netdev = netdev;
priv->profile = profile;
priv->ppriv = ppriv;
mutex_init(&priv->state_lock);
INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
priv->channels.params.num_channels = profile->max_nch(mdev);
mlx5e_build_rep_params(mdev, &priv->channels.params);
mlx5e_build_rep_netdev(netdev);
}
@ -503,7 +502,7 @@ err_del_flow_rule:
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_direct_rqts:
for (i = 0; i < priv->params.num_channels; i++)
for (i = 0; i < priv->channels.params.num_channels; i++)
mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
return err;
}
@ -516,7 +515,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
mlx5e_tc_cleanup(priv);
mlx5_del_flow_rules(rep->vport_rx_rule);
mlx5e_destroy_direct_tirs(priv);
for (i = 0; i < priv->params.num_channels; i++)
for (i = 0; i < priv->channels.params.num_channels; i++)
mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
}

View File

@ -156,28 +156,6 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1;
}
void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val)
{
bool was_opened;
if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
return;
if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) == val)
return;
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (was_opened)
mlx5e_close_locked(priv->netdev);
MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, val);
mlx5e_set_rq_type_params(priv, priv->params.rq_wq_type);
if (was_opened)
mlx5e_open_locked(priv->netdev);
}
#define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT)
static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,

View File

@ -293,7 +293,7 @@ void mlx5e_rx_am_work(struct work_struct *work)
struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am);
struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix];
mlx5_core_modify_cq_moderation(rq->priv->mdev, &rq->cq.mcq,
mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq,
cur_profile.usec, cur_profile.pkts);
am->state = MLX5E_AM_START_MEASURE;

View File

@ -236,12 +236,9 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
{
int err = 0;
err = mlx5e_refresh_tirs_self_loopback(priv->mdev, true);
if (err) {
netdev_err(priv->netdev,
"\tFailed to enable UC loopback err(%d)\n", err);
err = mlx5e_refresh_tirs(priv, true);
if (err)
return err;
}
lbtp->loopback_ok = false;
init_completion(&lbtp->comp);
@ -258,7 +255,7 @@ static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
struct mlx5e_lbt_priv *lbtp)
{
dev_remove_pack(&lbtp->pt);
mlx5e_refresh_tirs_self_loopback(priv->mdev, false);
mlx5e_refresh_tirs(priv, false);
}
#define MLX5E_LB_VERIFY_TIMEOUT (msecs_to_jiffies(200))

View File

@ -88,6 +88,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
{
struct mlx5e_priv *priv = netdev_priv(dev);
int channel_ix = fallback(dev, skb);
u16 num_channels;
int up = 0;
if (!netdev_get_num_tc(dev))
@ -99,11 +100,11 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
/* channel_ix can be larger than num_channels since
* dev->num_real_tx_queues = num_channels * num_tc
*/
if (channel_ix >= priv->params.num_channels)
channel_ix = reciprocal_scale(channel_ix,
priv->params.num_channels);
num_channels = priv->channels.params.num_channels;
if (channel_ix >= num_channels)
channel_ix = reciprocal_scale(channel_ix, num_channels);
return priv->channeltc_to_txq_map[channel_ix][up];
return priv->channel_tc2txq[channel_ix][up];
}
static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
@ -339,7 +340,7 @@ dma_unmap_wqe_err:
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_txqsq *sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)];
struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)];
return mlx5e_sq_xmit(sq, skb);
}

View File

@ -164,8 +164,7 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
{
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
struct mlx5e_channel *c = cq->channel;
struct mlx5e_priv *priv = c->priv;
struct net_device *netdev = priv->netdev;
struct net_device *netdev = c->netdev;
netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
__func__, mcq->cqn, event);