mirror of
https://github.com/torvalds/linux.git
synced 2024-12-02 09:01:34 +00:00
mlx5-updates-2019-09-05
1) Allover mlx5 cleanups 2) Added port congestion counters to ethtool stats: Add 3 counters per priority to ethtool using PPCNT: 2.1) rx_prio[p]_buf_discard - the number of packets discarded by device due to lack of per host receive buffers 2.2) rx_prio[p]_cong_discard - the number of packets discarded by device due to per host congestion 2.3) rx_prio[p]_marked - the number of packets ECN marked by device due to per host congestion -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAl1xgcsACgkQSD+KveBX +j7XSwf6A6Ri61kZ4fLRfrKcMn7rq7uzUn855UjtNQUpDlmoKSu9lY+SGtMQMENq 7AMvUmgZJe2Sw47o3N9mxqPa86HS9uGDFhnF7HOKxa/4uzoqqvvdget4BhP0h1xS tRDXScnuZfPIs1nuA0w1obgzYb0FwOJhtB1m3rQ6iywAohwmM8mYe9jPREfIGaoy U9p+DZEJHIC2YKy4G7hymbNbaKMgMG9IYl9axNyqbGaA9xPTPO4+pBBFvEXffLh6 UAk7p3XHV0ZIiKq1STIqDJpz6ucP88ywg9Hnkkrz2U82xuk/E43tBvIM9pxgl/bh UhJcoL2Wlr5aHNHgIlBgWZ07Q+J0aQ== =iM0A -----END PGP SIGNATURE----- Merge tag 'mlx5-updates-2019-09-05' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5-updates-2019-09-05 1) Allover mlx5 cleanups 2) Added port congestion counters to ethtool stats: Add 3 counters per priority to ethtool using PPCNT: 2.1) rx_prio[p]_buf_discard - the number of packets discarded by device due to lack of per host receive buffers 2.2) rx_prio[p]_cong_discard - the number of packets discarded by device due to per host congestion 2.3) rx_prio[p]_marked - the number of packets ECN marked by device due to per host congestion ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
22c63d9c94
@ -10,6 +10,7 @@ config MLX5_CORE
|
||||
imply PTP_1588_CLOCK
|
||||
imply VXLAN
|
||||
imply MLXFW
|
||||
imply PCI_HYPERV_INTERFACE
|
||||
default n
|
||||
---help---
|
||||
Core driver for low level functionality of the ConnectX-4 and
|
||||
@ -32,7 +33,6 @@ config MLX5_FPGA
|
||||
config MLX5_CORE_EN
|
||||
bool "Mellanox 5th generation network adapters (ConnectX series) Ethernet support"
|
||||
depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
|
||||
depends on IPV6=y || IPV6=n || MLX5_CORE=m
|
||||
select PAGE_POOL
|
||||
select DIMLIB
|
||||
default n
|
||||
|
@ -133,7 +133,7 @@ static int mlx5_devlink_fs_mode_validate(struct devlink *devlink, u32 id,
|
||||
|
||||
else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Software managed steering is not supported when eswitch offlaods enabled.");
|
||||
"Software managed steering is not supported when eswitch offloads enabled.");
|
||||
err = -EOPNOTSUPP;
|
||||
}
|
||||
} else {
|
||||
|
@ -30,22 +30,21 @@ mlx5e_hv_vhca_fill_ring_stats(struct mlx5e_priv *priv, int ch,
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5e_hv_vhca_fill_stats(struct mlx5e_priv *priv, u64 *data,
|
||||
static void mlx5e_hv_vhca_fill_stats(struct mlx5e_priv *priv, void *data,
|
||||
int buf_len)
|
||||
{
|
||||
int ch, i = 0;
|
||||
|
||||
for (ch = 0; ch < priv->max_nch; ch++) {
|
||||
u64 *buf = data + i;
|
||||
void *buf = data + i;
|
||||
|
||||
if (WARN_ON_ONCE(buf +
|
||||
sizeof(struct mlx5e_hv_vhca_per_ring_stats) >
|
||||
data + buf_len))
|
||||
return;
|
||||
|
||||
mlx5e_hv_vhca_fill_ring_stats(priv, ch,
|
||||
(struct mlx5e_hv_vhca_per_ring_stats *)buf);
|
||||
i += sizeof(struct mlx5e_hv_vhca_per_ring_stats) / sizeof(u64);
|
||||
mlx5e_hv_vhca_fill_ring_stats(priv, ch, buf);
|
||||
i += sizeof(struct mlx5e_hv_vhca_per_ring_stats);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -256,8 +256,7 @@ struct mlx5e_dump_wqe {
|
||||
};
|
||||
|
||||
static int
|
||||
tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
skb_frag_t *frag, u32 tisn, bool first)
|
||||
tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first)
|
||||
{
|
||||
struct mlx5_wqe_ctrl_seg *cseg;
|
||||
struct mlx5_wqe_data_seg *dseg;
|
||||
@ -371,8 +370,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
|
||||
tx_post_resync_params(sq, priv_tx, info.rcd_sn);
|
||||
|
||||
for (i = 0; i < info.nr_frags; i++)
|
||||
if (tx_post_resync_dump(sq, skb, info.frags[i],
|
||||
priv_tx->tisn, !i))
|
||||
if (tx_post_resync_dump(sq, info.frags[i], priv_tx->tisn, !i))
|
||||
goto err_out;
|
||||
|
||||
/* If no dump WQE was sent, we need to have a fence NOP WQE before the
|
||||
|
@ -1315,7 +1315,6 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c,
|
||||
return 0;
|
||||
|
||||
err_free_txqsq:
|
||||
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
|
||||
mlx5e_free_txqsq(sq);
|
||||
|
||||
return err;
|
||||
@ -1403,7 +1402,6 @@ int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
|
||||
return 0;
|
||||
|
||||
err_free_icosq:
|
||||
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
|
||||
mlx5e_free_icosq(sq);
|
||||
|
||||
return err;
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include <net/netevent.h>
|
||||
#include <net/arp.h>
|
||||
#include <net/devlink.h>
|
||||
#include <net/ipv6_stubs.h>
|
||||
|
||||
#include "eswitch.h"
|
||||
#include "en.h"
|
||||
@ -499,16 +500,18 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
|
||||
mlx5e_sqs2vport_stop(esw, rep);
|
||||
}
|
||||
|
||||
static unsigned long mlx5e_rep_ipv6_interval(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_IPV6) && ipv6_stub->nd_tbl)
|
||||
return NEIGH_VAR(&ipv6_stub->nd_tbl->parms, DELAY_PROBE_TIME);
|
||||
|
||||
return ~0UL;
|
||||
}
|
||||
|
||||
static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms,
|
||||
DELAY_PROBE_TIME);
|
||||
#else
|
||||
unsigned long ipv6_interval = ~0UL;
|
||||
#endif
|
||||
unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
|
||||
DELAY_PROBE_TIME);
|
||||
unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
|
||||
unsigned long ipv6_interval = mlx5e_rep_ipv6_interval();
|
||||
struct net_device *netdev = rpriv->netdev;
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
@ -917,7 +920,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
|
||||
case NETEVENT_NEIGH_UPDATE:
|
||||
n = ptr;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
if (n->tbl != &nd_tbl && n->tbl != &arp_tbl)
|
||||
if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl)
|
||||
#else
|
||||
if (n->tbl != &arp_tbl)
|
||||
#endif
|
||||
@ -944,7 +947,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
|
||||
* done per device delay prob time parameter.
|
||||
*/
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl))
|
||||
if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl))
|
||||
#else
|
||||
if (!p->dev || p->tbl != &arp_tbl)
|
||||
#endif
|
||||
|
@ -183,7 +183,6 @@ struct mlx5e_rep_sq {
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev);
|
||||
void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev);
|
||||
void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev);
|
||||
bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
|
||||
|
@ -981,6 +981,147 @@ static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv)
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
|
||||
}
|
||||
|
||||
#define PPORT_PER_TC_PRIO_OFF(c) \
|
||||
MLX5_BYTE_OFF(ppcnt_reg, \
|
||||
counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
|
||||
|
||||
static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
|
||||
{ "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
|
||||
};
|
||||
|
||||
#define NUM_PPORT_PER_TC_PRIO_COUNTERS ARRAY_SIZE(pport_per_tc_prio_stats_desc)
|
||||
|
||||
#define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
|
||||
MLX5_BYTE_OFF(ppcnt_reg, \
|
||||
counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
|
||||
|
||||
static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
|
||||
{ "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
|
||||
{ "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
|
||||
};
|
||||
|
||||
#define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
|
||||
ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
|
||||
|
||||
static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, sbcam_reg))
|
||||
return 0;
|
||||
|
||||
return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
|
||||
}
|
||||
|
||||
static int mlx5e_grp_per_port_buffer_congest_fill_strings(struct mlx5e_priv *priv,
|
||||
u8 *data, int idx)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int i, prio;
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, sbcam_reg))
|
||||
return idx;
|
||||
|
||||
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
|
||||
for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
|
||||
sprintf(data + (idx++) * ETH_GSTRING_LEN,
|
||||
pport_per_tc_prio_stats_desc[i].format, prio);
|
||||
for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
|
||||
sprintf(data + (idx++) * ETH_GSTRING_LEN,
|
||||
pport_per_tc_congest_prio_stats_desc[i].format, prio);
|
||||
}
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
static int mlx5e_grp_per_port_buffer_congest_fill_stats(struct mlx5e_priv *priv,
|
||||
u64 *data, int idx)
|
||||
{
|
||||
struct mlx5e_pport_stats *pport = &priv->stats.pport;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int i, prio;
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, sbcam_reg))
|
||||
return idx;
|
||||
|
||||
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
|
||||
for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
|
||||
data[idx++] =
|
||||
MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
|
||||
pport_per_tc_prio_stats_desc, i);
|
||||
for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
|
||||
data[idx++] =
|
||||
MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
|
||||
pport_per_tc_congest_prio_stats_desc, i);
|
||||
}
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
|
||||
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
|
||||
void *out;
|
||||
int prio;
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, sbcam_reg))
|
||||
return;
|
||||
|
||||
MLX5_SET(ppcnt_reg, in, pnat, 2);
|
||||
MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
|
||||
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
|
||||
out = pstats->per_tc_prio_counters[prio];
|
||||
MLX5_SET(ppcnt_reg, in, prio_tc, prio);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, sbcam_reg))
|
||||
return 0;
|
||||
|
||||
return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
|
||||
}
|
||||
|
||||
static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
|
||||
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
|
||||
void *out;
|
||||
int prio;
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, sbcam_reg))
|
||||
return;
|
||||
|
||||
MLX5_SET(ppcnt_reg, in, pnat, 2);
|
||||
MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
|
||||
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
|
||||
out = pstats->per_tc_congest_prio_counters[prio];
|
||||
MLX5_SET(ppcnt_reg, in, prio_tc, prio);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static int mlx5e_grp_per_port_buffer_congest_get_num_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
|
||||
mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
|
||||
}
|
||||
|
||||
static void mlx5e_grp_per_port_buffer_congest_update_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
mlx5e_grp_per_tc_prio_update_stats(priv);
|
||||
mlx5e_grp_per_tc_congest_prio_update_stats(priv);
|
||||
}
|
||||
|
||||
#define PPORT_PER_PRIO_OFF(c) \
|
||||
MLX5_BYTE_OFF(ppcnt_reg, \
|
||||
counter_set.eth_per_prio_grp_data_layout.c##_high)
|
||||
@ -1610,7 +1751,13 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
|
||||
.get_num_stats = mlx5e_grp_channels_get_num_stats,
|
||||
.fill_strings = mlx5e_grp_channels_fill_strings,
|
||||
.fill_stats = mlx5e_grp_channels_fill_stats,
|
||||
}
|
||||
},
|
||||
{
|
||||
.get_num_stats = mlx5e_grp_per_port_buffer_congest_get_num_stats,
|
||||
.fill_strings = mlx5e_grp_per_port_buffer_congest_fill_strings,
|
||||
.fill_stats = mlx5e_grp_per_port_buffer_congest_fill_stats,
|
||||
.update_stats = mlx5e_grp_per_port_buffer_congest_update_stats,
|
||||
},
|
||||
};
|
||||
|
||||
const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps);
|
||||
|
@ -207,6 +207,8 @@ struct mlx5e_pport_stats {
|
||||
__be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
|
||||
__be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
|
||||
__be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
|
||||
__be64 per_tc_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
|
||||
__be64 per_tc_congest_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
|
||||
};
|
||||
|
||||
#define PCIE_PERF_GET(pcie_stats, c) \
|
||||
|
@ -988,10 +988,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
|
||||
&flow_act, dest, dest_ix);
|
||||
mutex_unlock(&priv->fs.tc.t_lock);
|
||||
|
||||
if (IS_ERR(flow->rule[0]))
|
||||
return PTR_ERR(flow->rule[0]);
|
||||
|
||||
return 0;
|
||||
return PTR_ERR_OR_ZERO(flow->rule[0]);
|
||||
}
|
||||
|
||||
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
|
||||
@ -1492,7 +1489,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
|
||||
tbl = &arp_tbl;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
else if (m_neigh->family == AF_INET6)
|
||||
tbl = &nd_tbl;
|
||||
tbl = ipv6_stub->nd_tbl;
|
||||
#endif
|
||||
else
|
||||
return;
|
||||
|
@ -1651,7 +1651,7 @@ static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mlx5_flow_namespace_set_peer(peer_ns, ns);
|
||||
err = mlx5_flow_namespace_set_peer(peer_ns, ns);
|
||||
if (err) {
|
||||
mlx5_flow_namespace_set_peer(ns, NULL);
|
||||
return err;
|
||||
|
@ -66,27 +66,27 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
|
||||
dmn->uar = mlx5_get_uars_page(dmn->mdev);
|
||||
if (!dmn->uar) {
|
||||
mlx5dr_err(dmn, "Couldn't allocate UAR\n");
|
||||
ret = -ENOMEM;
|
||||
goto clean_pd;
|
||||
}
|
||||
|
||||
dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
|
||||
if (!dmn->ste_icm_pool) {
|
||||
mlx5dr_err(dmn, "Couldn't get icm memory for %s\n",
|
||||
dev_name(dmn->mdev->device));
|
||||
mlx5dr_err(dmn, "Couldn't get icm memory\n");
|
||||
ret = -ENOMEM;
|
||||
goto clean_uar;
|
||||
}
|
||||
|
||||
dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
|
||||
if (!dmn->action_icm_pool) {
|
||||
mlx5dr_err(dmn, "Couldn't get action icm memory for %s\n",
|
||||
dev_name(dmn->mdev->device));
|
||||
mlx5dr_err(dmn, "Couldn't get action icm memory\n");
|
||||
ret = -ENOMEM;
|
||||
goto free_ste_icm_pool;
|
||||
}
|
||||
|
||||
ret = mlx5dr_send_ring_alloc(dmn);
|
||||
if (ret) {
|
||||
mlx5dr_err(dmn, "Couldn't create send-ring for %s\n",
|
||||
dev_name(dmn->mdev->device));
|
||||
mlx5dr_err(dmn, "Couldn't create send-ring\n");
|
||||
goto free_action_icm_pool;
|
||||
}
|
||||
|
||||
@ -309,16 +309,14 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
|
||||
dmn->info.caps.log_icm_size);
|
||||
|
||||
if (!dmn->info.supp_sw_steering) {
|
||||
mlx5dr_err(dmn, "SW steering not supported for %s\n",
|
||||
dev_name(mdev->device));
|
||||
mlx5dr_err(dmn, "SW steering is not supported\n");
|
||||
goto uninit_caps;
|
||||
}
|
||||
|
||||
/* Allocate resources */
|
||||
ret = dr_domain_init_resources(dmn);
|
||||
if (ret) {
|
||||
mlx5dr_err(dmn, "Failed init domain resources for %s\n",
|
||||
dev_name(mdev->device));
|
||||
mlx5dr_err(dmn, "Failed init domain resources\n");
|
||||
goto uninit_caps;
|
||||
}
|
||||
|
||||
|
@ -899,7 +899,6 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
|
||||
goto clean_qp;
|
||||
}
|
||||
|
||||
memset(dmn->send_ring->buf, 0, size);
|
||||
dmn->send_ring->buf_size = size;
|
||||
|
||||
dmn->send_ring->mr = dr_reg_mr(dmn->mdev,
|
||||
|
@ -1316,6 +1316,7 @@ enum {
|
||||
MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
|
||||
MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
|
||||
MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
|
||||
MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP = 0x13,
|
||||
MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
|
||||
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
|
||||
};
|
||||
|
@ -1196,7 +1196,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
||||
u8 rts2rts_qp_counters_set_id[0x1];
|
||||
u8 reserved_at_16a[0x2];
|
||||
u8 vnic_env_int_rq_oob[0x1];
|
||||
u8 reserved_at_16d[0x2];
|
||||
u8 sbcam_reg[0x1];
|
||||
u8 reserved_at_16e[0x1];
|
||||
u8 qcam_reg[0x1];
|
||||
u8 gid_table_size[0x10];
|
||||
|
||||
@ -1960,12 +1961,28 @@ struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits {
|
||||
u8 port_xmit_wait[0x20];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
|
||||
struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits {
|
||||
u8 transmit_queue_high[0x20];
|
||||
|
||||
u8 transmit_queue_low[0x20];
|
||||
|
||||
u8 reserved_at_40[0x780];
|
||||
u8 no_buffer_discard_uc_high[0x20];
|
||||
|
||||
u8 no_buffer_discard_uc_low[0x20];
|
||||
|
||||
u8 reserved_at_80[0x740];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits {
|
||||
u8 wred_discard_high[0x20];
|
||||
|
||||
u8 wred_discard_low[0x20];
|
||||
|
||||
u8 ecn_marked_tc_high[0x20];
|
||||
|
||||
u8 ecn_marked_tc_low[0x20];
|
||||
|
||||
u8 reserved_at_80[0x740];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
|
||||
@ -3642,7 +3659,8 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
|
||||
struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
|
||||
struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
|
||||
struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
|
||||
struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
|
||||
struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits eth_per_tc_prio_grp_data_layout;
|
||||
struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits eth_per_tc_congest_prio_grp_data_layout;
|
||||
struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout;
|
||||
struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
|
||||
struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs;
|
||||
@ -9422,7 +9440,8 @@ union mlx5_ifc_ports_control_registers_document_bits {
|
||||
struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
|
||||
struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
|
||||
struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
|
||||
struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
|
||||
struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits eth_per_tc_prio_grp_data_layout;
|
||||
struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits eth_per_tc_congest_prio_grp_data_layout;
|
||||
struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping;
|
||||
struct mlx5_ifc_pamp_reg_bits pamp_reg;
|
||||
struct mlx5_ifc_paos_reg_bits paos_reg;
|
||||
|
Loading…
Reference in New Issue
Block a user