Merge branch 'hns3-next'
Huazhong Tan says: ==================== net: hns3: updates for -next This patchset adds support for tc mqprio offload, hw tc offload of tc flower, and adpation for max rss size changes. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
a7105e3472
drivers/net/ethernet/hisilicon/hns3
@ -29,7 +29,9 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/pkt_sched.h>
|
||||
#include <linux/types.h>
|
||||
#include <net/pkt_cls.h>
|
||||
|
||||
#define HNAE3_MOD_VERSION "1.0"
|
||||
|
||||
@ -457,6 +459,12 @@ struct hnae3_ae_dev {
|
||||
* Configure the default MAC for specified VF
|
||||
* get_module_eeprom
|
||||
* Get the optical module eeprom info.
|
||||
* add_cls_flower
|
||||
* Add clsflower rule
|
||||
* del_cls_flower
|
||||
* Delete clsflower rule
|
||||
* cls_flower_active
|
||||
* Check if any cls flower rule exist
|
||||
*/
|
||||
struct hnae3_ae_ops {
|
||||
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
|
||||
@ -634,6 +642,11 @@ struct hnae3_ae_ops {
|
||||
int (*get_module_eeprom)(struct hnae3_handle *handle, u32 offset,
|
||||
u32 len, u8 *data);
|
||||
bool (*get_cmdq_stat)(struct hnae3_handle *handle);
|
||||
int (*add_cls_flower)(struct hnae3_handle *handle,
|
||||
struct flow_cls_offload *cls_flower, int tc);
|
||||
int (*del_cls_flower)(struct hnae3_handle *handle,
|
||||
struct flow_cls_offload *cls_flower);
|
||||
bool (*cls_flower_active)(struct hnae3_handle *handle);
|
||||
};
|
||||
|
||||
struct hnae3_dcb_ops {
|
||||
@ -647,7 +660,8 @@ struct hnae3_dcb_ops {
|
||||
u8 (*getdcbx)(struct hnae3_handle *);
|
||||
u8 (*setdcbx)(struct hnae3_handle *, u8);
|
||||
|
||||
int (*setup_tc)(struct hnae3_handle *, u8, u8 *);
|
||||
int (*setup_tc)(struct hnae3_handle *handle,
|
||||
struct tc_mqprio_qopt_offload *mqprio_qopt);
|
||||
};
|
||||
|
||||
struct hnae3_ae_algo {
|
||||
@ -659,15 +673,17 @@ struct hnae3_ae_algo {
|
||||
#define HNAE3_INT_NAME_LEN 32
|
||||
#define HNAE3_ITR_COUNTDOWN_START 100
|
||||
|
||||
struct hnae3_tc_info {
|
||||
u16 tqp_offset; /* TQP offset from base TQP */
|
||||
u16 tqp_count; /* Total TQPs */
|
||||
u8 tc; /* TC index */
|
||||
bool enable; /* If this TC is enable or not */
|
||||
};
|
||||
|
||||
#define HNAE3_MAX_TC 8
|
||||
#define HNAE3_MAX_USER_PRIO 8
|
||||
struct hnae3_tc_info {
|
||||
u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
|
||||
u16 tqp_count[HNAE3_MAX_TC];
|
||||
u16 tqp_offset[HNAE3_MAX_TC];
|
||||
unsigned long tc_en; /* bitmap of TC enabled */
|
||||
u8 num_tc; /* Total number of enabled TCs */
|
||||
bool mqprio_active;
|
||||
};
|
||||
|
||||
struct hnae3_knic_private_info {
|
||||
struct net_device *netdev; /* Set by KNIC client when init instance */
|
||||
u16 rss_size; /* Allocated RSS queues */
|
||||
@ -676,9 +692,7 @@ struct hnae3_knic_private_info {
|
||||
u16 num_tx_desc;
|
||||
u16 num_rx_desc;
|
||||
|
||||
u8 num_tc; /* Total number of enabled TCs */
|
||||
u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
|
||||
struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */
|
||||
struct hnae3_tc_info tc_info;
|
||||
|
||||
u16 num_tqps; /* total number of TQPs in this handle */
|
||||
struct hnae3_queue **tqp; /* array base of all TQPs in this instance */
|
||||
|
@ -385,7 +385,8 @@ static void hns3_dbg_dev_specs(struct hnae3_handle *h)
|
||||
dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len);
|
||||
dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
|
||||
dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
|
||||
dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc);
|
||||
dev_info(priv->dev, "Total number of enabled TCs: %u\n",
|
||||
kinfo->tc_info.num_tc);
|
||||
dev_info(priv->dev, "MAX INT QL: %u\n", dev_specs->int_ql_max);
|
||||
dev_info(priv->dev, "MAX INT GL: %u\n", dev_specs->max_int_gl);
|
||||
}
|
||||
|
@ -323,13 +323,14 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
|
||||
{
|
||||
struct hnae3_handle *h = hns3_get_handle(netdev);
|
||||
struct hnae3_knic_private_info *kinfo = &h->kinfo;
|
||||
unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
|
||||
struct hnae3_tc_info *tc_info = &kinfo->tc_info;
|
||||
unsigned int queue_size = kinfo->num_tqps;
|
||||
int i, ret;
|
||||
|
||||
if (kinfo->num_tc <= 1) {
|
||||
if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) {
|
||||
netdev_reset_tc(netdev);
|
||||
} else {
|
||||
ret = netdev_set_num_tc(netdev, kinfo->num_tc);
|
||||
ret = netdev_set_num_tc(netdev, tc_info->num_tc);
|
||||
if (ret) {
|
||||
netdev_err(netdev,
|
||||
"netdev_set_num_tc fail, ret=%d!\n", ret);
|
||||
@ -337,13 +338,11 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
|
||||
}
|
||||
|
||||
for (i = 0; i < HNAE3_MAX_TC; i++) {
|
||||
if (!kinfo->tc_info[i].enable)
|
||||
if (!test_bit(i, &tc_info->tc_en))
|
||||
continue;
|
||||
|
||||
netdev_set_tc_queue(netdev,
|
||||
kinfo->tc_info[i].tc,
|
||||
kinfo->tc_info[i].tqp_count,
|
||||
kinfo->tc_info[i].tqp_offset);
|
||||
netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i],
|
||||
tc_info->tqp_offset[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -369,7 +368,7 @@ static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
|
||||
u16 alloc_tqps, max_rss_size, rss_size;
|
||||
|
||||
h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
|
||||
rss_size = alloc_tqps / h->kinfo.num_tc;
|
||||
rss_size = alloc_tqps / h->kinfo.tc_info.num_tc;
|
||||
|
||||
return min_t(u16, rss_size, max_rss_size);
|
||||
}
|
||||
@ -508,7 +507,7 @@ static int hns3_nic_net_open(struct net_device *netdev)
|
||||
|
||||
kinfo = &h->kinfo;
|
||||
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
|
||||
netdev_set_prio_tc_map(netdev, i, kinfo->prio_tc[i]);
|
||||
netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]);
|
||||
|
||||
if (h->ae_algo->ops->set_timer_task)
|
||||
h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
|
||||
@ -1669,6 +1668,13 @@ static int hns3_nic_set_features(struct net_device *netdev,
|
||||
h->ae_algo->ops->enable_fd(h, enable);
|
||||
}
|
||||
|
||||
if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
|
||||
h->ae_algo->ops->cls_flower_active(h)) {
|
||||
netdev_err(netdev,
|
||||
"there are offloaded TC filters active, cannot disable HW TC offload");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
netdev->features = features;
|
||||
return 0;
|
||||
}
|
||||
@ -1794,7 +1800,6 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
|
||||
static int hns3_setup_tc(struct net_device *netdev, void *type_data)
|
||||
{
|
||||
struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
|
||||
u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
|
||||
struct hnae3_knic_private_info *kinfo;
|
||||
u8 tc = mqprio_qopt->qopt.num_tc;
|
||||
u16 mode = mqprio_qopt->mode;
|
||||
@ -1817,16 +1822,70 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
|
||||
netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
|
||||
|
||||
return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
|
||||
kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP;
|
||||
kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static int hns3_setup_tc_cls_flower(struct hns3_nic_priv *priv,
|
||||
struct flow_cls_offload *flow)
|
||||
{
|
||||
int tc = tc_classid_to_hwtc(priv->netdev, flow->classid);
|
||||
struct hnae3_handle *h = hns3_get_handle(priv->netdev);
|
||||
|
||||
switch (flow->command) {
|
||||
case FLOW_CLS_REPLACE:
|
||||
if (h->ae_algo->ops->add_cls_flower)
|
||||
return h->ae_algo->ops->add_cls_flower(h, flow, tc);
|
||||
break;
|
||||
case FLOW_CLS_DESTROY:
|
||||
if (h->ae_algo->ops->del_cls_flower)
|
||||
return h->ae_algo->ops->del_cls_flower(h, flow);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static int hns3_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
|
||||
void *cb_priv)
|
||||
{
|
||||
struct hns3_nic_priv *priv = cb_priv;
|
||||
|
||||
if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (type) {
|
||||
case TC_SETUP_CLSFLOWER:
|
||||
return hns3_setup_tc_cls_flower(priv, type_data);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
static LIST_HEAD(hns3_block_cb_list);
|
||||
|
||||
static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
||||
void *type_data)
|
||||
{
|
||||
if (type != TC_SETUP_QDISC_MQPRIO)
|
||||
return -EOPNOTSUPP;
|
||||
struct hns3_nic_priv *priv = netdev_priv(dev);
|
||||
int ret;
|
||||
|
||||
return hns3_setup_tc(dev, type_data);
|
||||
switch (type) {
|
||||
case TC_SETUP_QDISC_MQPRIO:
|
||||
ret = hns3_setup_tc(dev, type_data);
|
||||
break;
|
||||
case TC_SETUP_BLOCK:
|
||||
ret = flow_block_cb_setup_simple(type_data,
|
||||
&hns3_block_cb_list,
|
||||
hns3_setup_tc_block_cb,
|
||||
priv, priv, true);
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns3_vlan_rx_add_vid(struct net_device *netdev,
|
||||
@ -2423,6 +2482,11 @@ static void hns3_set_default_feature(struct net_device *netdev)
|
||||
netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
||||
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
||||
}
|
||||
|
||||
if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
|
||||
netdev->hw_features |= NETIF_F_HW_TC;
|
||||
netdev->features |= NETIF_F_HW_TC;
|
||||
}
|
||||
}
|
||||
|
||||
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
|
||||
@ -3980,21 +4044,20 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
|
||||
static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
|
||||
{
|
||||
struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
|
||||
struct hnae3_tc_info *tc_info = &kinfo->tc_info;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < HNAE3_MAX_TC; i++) {
|
||||
struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
|
||||
int j;
|
||||
|
||||
if (!tc_info->enable)
|
||||
if (!test_bit(i, &tc_info->tc_en))
|
||||
continue;
|
||||
|
||||
for (j = 0; j < tc_info->tqp_count; j++) {
|
||||
for (j = 0; j < tc_info->tqp_count[i]; j++) {
|
||||
struct hnae3_queue *q;
|
||||
|
||||
q = priv->ring[tc_info->tqp_offset + j].tqp;
|
||||
hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
|
||||
tc_info->tc);
|
||||
q = priv->ring[tc_info->tqp_offset[i] + j].tqp;
|
||||
hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -4121,7 +4184,8 @@ static void hns3_info_show(struct hns3_nic_priv *priv)
|
||||
dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len);
|
||||
dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
|
||||
dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
|
||||
dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc);
|
||||
dev_info(priv->dev, "Total number of enabled TCs: %u\n",
|
||||
kinfo->tc_info.num_tc);
|
||||
dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu);
|
||||
}
|
||||
|
||||
@ -4692,6 +4756,12 @@ int hns3_set_channels(struct net_device *netdev,
|
||||
if (ch->rx_count || ch->tx_count)
|
||||
return -EINVAL;
|
||||
|
||||
if (kinfo->tc_info.mqprio_active) {
|
||||
dev_err(&netdev->dev,
|
||||
"it's not allowed to set channels via ethtool when MQPRIO mode is on\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (new_tqp_num > hns3_get_max_available_channels(h) ||
|
||||
new_tqp_num < 1) {
|
||||
dev_err(&netdev->dev,
|
||||
|
@ -359,6 +359,8 @@ static void hclge_parse_capability(struct hclge_dev *hdev,
|
||||
set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps);
|
||||
if (hnae3_get_bit(caps, HCLGE_CAP_UDP_TUNNEL_CSUM_B))
|
||||
set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps);
|
||||
if (hnae3_get_bit(caps, HCLGE_CAP_FD_FORWARD_TC_B))
|
||||
set_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps);
|
||||
}
|
||||
|
||||
static enum hclge_cmd_status
|
||||
|
@ -518,6 +518,8 @@ struct hclge_pf_res_cmd {
|
||||
#define HCLGE_CFG_SPEED_ABILITY_EXT_M GENMASK(15, 10)
|
||||
#define HCLGE_CFG_UMV_TBL_SPACE_S 16
|
||||
#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16)
|
||||
#define HCLGE_CFG_PF_RSS_SIZE_S 0
|
||||
#define HCLGE_CFG_PF_RSS_SIZE_M GENMASK(3, 0)
|
||||
|
||||
#define HCLGE_CFG_CMD_CNT 4
|
||||
|
||||
@ -558,18 +560,23 @@ struct hclge_rss_input_tuple_cmd {
|
||||
};
|
||||
|
||||
#define HCLGE_RSS_CFG_TBL_SIZE 16
|
||||
#define HCLGE_RSS_CFG_TBL_SIZE_H 4
|
||||
#define HCLGE_RSS_CFG_TBL_BW_H 2U
|
||||
#define HCLGE_RSS_CFG_TBL_BW_L 8U
|
||||
|
||||
struct hclge_rss_indirection_table_cmd {
|
||||
__le16 start_table_index;
|
||||
__le16 rss_set_bitmap;
|
||||
u8 rsv[4];
|
||||
u8 rss_result[HCLGE_RSS_CFG_TBL_SIZE];
|
||||
u8 rss_qid_h[HCLGE_RSS_CFG_TBL_SIZE_H];
|
||||
u8 rss_qid_l[HCLGE_RSS_CFG_TBL_SIZE];
|
||||
};
|
||||
|
||||
#define HCLGE_RSS_TC_OFFSET_S 0
|
||||
#define HCLGE_RSS_TC_OFFSET_M GENMASK(9, 0)
|
||||
#define HCLGE_RSS_TC_OFFSET_M GENMASK(10, 0)
|
||||
#define HCLGE_RSS_TC_SIZE_MSB_B 11
|
||||
#define HCLGE_RSS_TC_SIZE_S 12
|
||||
#define HCLGE_RSS_TC_SIZE_M GENMASK(14, 12)
|
||||
#define HCLGE_RSS_TC_SIZE_MSB_OFFSET 3
|
||||
#define HCLGE_RSS_TC_VALID_B 15
|
||||
struct hclge_rss_tc_mode_cmd {
|
||||
__le16 rss_tc_mode[HCLGE_MAX_TC_NUM];
|
||||
@ -1051,6 +1058,9 @@ struct hclge_fd_tcam_config_3_cmd {
|
||||
#define HCLGE_FD_AD_WR_RULE_ID_B 0
|
||||
#define HCLGE_FD_AD_RULE_ID_S 1
|
||||
#define HCLGE_FD_AD_RULE_ID_M GENMASK(13, 1)
|
||||
#define HCLGE_FD_AD_TC_OVRD_B 16
|
||||
#define HCLGE_FD_AD_TC_SIZE_S 17
|
||||
#define HCLGE_FD_AD_TC_SIZE_M GENMASK(20, 17)
|
||||
|
||||
struct hclge_fd_ad_config_cmd {
|
||||
u8 stage;
|
||||
|
@ -397,32 +397,130 @@ static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hclge_mqprio_qopt_check(struct hclge_dev *hdev,
|
||||
struct tc_mqprio_qopt_offload *mqprio_qopt)
|
||||
{
|
||||
u16 queue_sum = 0;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
if (!mqprio_qopt->qopt.num_tc) {
|
||||
mqprio_qopt->qopt.num_tc = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = hclge_dcb_common_validate(hdev, mqprio_qopt->qopt.num_tc,
|
||||
mqprio_qopt->qopt.prio_tc_map);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < mqprio_qopt->qopt.num_tc; i++) {
|
||||
if (!is_power_of_2(mqprio_qopt->qopt.count[i])) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"qopt queue count must be power of 2\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (mqprio_qopt->qopt.count[i] > hdev->pf_rss_size_max) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"qopt queue count should be no more than %u\n",
|
||||
hdev->pf_rss_size_max);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (mqprio_qopt->qopt.offset[i] != queue_sum) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"qopt queue offset must start from 0, and being continuous\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (mqprio_qopt->min_rate[i] || mqprio_qopt->max_rate[i]) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"qopt tx_rate is not supported\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
queue_sum = mqprio_qopt->qopt.offset[i];
|
||||
queue_sum += mqprio_qopt->qopt.count[i];
|
||||
}
|
||||
if (hdev->vport[0].alloc_tqps < queue_sum) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"qopt queue count sum should be less than %u\n",
|
||||
hdev->vport[0].alloc_tqps);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
|
||||
struct tc_mqprio_qopt_offload *mqprio_qopt)
|
||||
{
|
||||
int i;
|
||||
|
||||
memset(tc_info, 0, sizeof(*tc_info));
|
||||
tc_info->num_tc = mqprio_qopt->qopt.num_tc;
|
||||
memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map,
|
||||
sizeof_field(struct hnae3_tc_info, prio_tc));
|
||||
memcpy(tc_info->tqp_count, mqprio_qopt->qopt.count,
|
||||
sizeof_field(struct hnae3_tc_info, tqp_count));
|
||||
memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset,
|
||||
sizeof_field(struct hnae3_tc_info, tqp_offset));
|
||||
|
||||
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
|
||||
set_bit(tc_info->prio_tc[i], &tc_info->tc_en);
|
||||
}
|
||||
|
||||
static int hclge_config_tc(struct hclge_dev *hdev,
|
||||
struct hnae3_tc_info *tc_info)
|
||||
{
|
||||
int i;
|
||||
|
||||
hclge_tm_schd_info_update(hdev, tc_info->num_tc);
|
||||
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
|
||||
hdev->tm_info.prio_tc[i] = tc_info->prio_tc[i];
|
||||
|
||||
return hclge_map_update(hdev);
|
||||
}
|
||||
|
||||
/* Set up TC for hardware offloaded mqprio in channel mode */
|
||||
static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
|
||||
static int hclge_setup_tc(struct hnae3_handle *h,
|
||||
struct tc_mqprio_qopt_offload *mqprio_qopt)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(h);
|
||||
struct hnae3_knic_private_info *kinfo;
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
struct hnae3_tc_info old_tc_info;
|
||||
u8 tc = mqprio_qopt->qopt.num_tc;
|
||||
int ret;
|
||||
|
||||
/* if client unregistered, it's not allowed to change
|
||||
* mqprio configuration, which may cause uninit ring
|
||||
* fail.
|
||||
*/
|
||||
if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
|
||||
return -EBUSY;
|
||||
|
||||
if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
|
||||
return -EINVAL;
|
||||
|
||||
ret = hclge_dcb_common_validate(hdev, tc, prio_tc);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"failed to check mqprio qopt params, ret = %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hclge_notify_down_uinit(hdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hclge_tm_schd_info_update(hdev, tc);
|
||||
hclge_tm_prio_tc_info_update(hdev, prio_tc);
|
||||
kinfo = &vport->nic.kinfo;
|
||||
memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info));
|
||||
hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt);
|
||||
kinfo->tc_info.mqprio_active = tc > 0;
|
||||
|
||||
ret = hclge_tm_init_hw(hdev, false);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
|
||||
ret = hclge_client_setup_tc(hdev);
|
||||
ret = hclge_config_tc(hdev, &kinfo->tc_info);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
|
||||
@ -436,6 +534,12 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
|
||||
return hclge_notify_init_up(hdev);
|
||||
|
||||
err_out:
|
||||
/* roll-back */
|
||||
memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
|
||||
if (hclge_config_tc(hdev, &kinfo->tc_info))
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"failed to roll back tc configuration\n");
|
||||
|
||||
hclge_notify_init_up(hdev);
|
||||
|
||||
return ret;
|
||||
|
@ -1454,7 +1454,7 @@ static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev)
|
||||
|
||||
dev_info(&hdev->pdev->dev, "qs cfg of vport%d:\n", vport_id);
|
||||
|
||||
for (i = 0; i < kinfo->num_tc; i++) {
|
||||
for (i = 0; i < kinfo->tc_info.num_tc; i++) {
|
||||
u16 qsid = vport->qs_offset + i;
|
||||
|
||||
hclge_dbg_dump_qs_shaper_single(hdev, qsid);
|
||||
|
@ -1285,9 +1285,9 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
|
||||
cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
|
||||
HCLGE_CFG_DEFAULT_SPEED_M,
|
||||
HCLGE_CFG_DEFAULT_SPEED_S);
|
||||
cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
|
||||
HCLGE_CFG_RSS_SIZE_M,
|
||||
HCLGE_CFG_RSS_SIZE_S);
|
||||
cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
|
||||
HCLGE_CFG_RSS_SIZE_M,
|
||||
HCLGE_CFG_RSS_SIZE_S);
|
||||
|
||||
for (i = 0; i < ETH_ALEN; i++)
|
||||
cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
|
||||
@ -1308,6 +1308,21 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
|
||||
HCLGE_CFG_UMV_TBL_SPACE_S);
|
||||
if (!cfg->umv_space)
|
||||
cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
|
||||
|
||||
cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
|
||||
HCLGE_CFG_PF_RSS_SIZE_M,
|
||||
HCLGE_CFG_PF_RSS_SIZE_S);
|
||||
|
||||
/* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
|
||||
* power of 2, instead of reading out directly. This would
|
||||
* be more flexible for future changes and expansions.
|
||||
* When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
|
||||
* it does not make sense if PF's field is 0. In this case, PF and VF
|
||||
* has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
|
||||
*/
|
||||
cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
|
||||
1U << cfg->pf_rss_size_max :
|
||||
cfg->vf_rss_size_max;
|
||||
}
|
||||
|
||||
/* hclge_get_cfg: query the static parameter from flash
|
||||
@ -1469,7 +1484,8 @@ static int hclge_configure(struct hclge_dev *hdev)
|
||||
|
||||
hdev->num_vmdq_vport = cfg.vmdq_vport_num;
|
||||
hdev->base_tqp_pid = 0;
|
||||
hdev->rss_size_max = cfg.rss_size_max;
|
||||
hdev->vf_rss_size_max = cfg.vf_rss_size_max;
|
||||
hdev->pf_rss_size_max = cfg.pf_rss_size_max;
|
||||
hdev->rx_buf_len = cfg.rx_buf_len;
|
||||
ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
|
||||
hdev->hw.mac.media_type = cfg.media_type;
|
||||
@ -1652,7 +1668,7 @@ static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
|
||||
}
|
||||
}
|
||||
vport->alloc_tqps = alloced;
|
||||
kinfo->rss_size = min_t(u16, hdev->rss_size_max,
|
||||
kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
|
||||
vport->alloc_tqps / hdev->tm_info.num_tc);
|
||||
|
||||
/* ensure one to one mapping between irq and queue at default */
|
||||
@ -4262,12 +4278,16 @@ static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
|
||||
static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
|
||||
{
|
||||
struct hclge_rss_indirection_table_cmd *req;
|
||||
struct hclge_desc desc;
|
||||
int i, j;
|
||||
u8 rss_msb_oft;
|
||||
u8 rss_msb_val;
|
||||
int ret;
|
||||
u16 qid;
|
||||
int i;
|
||||
u32 j;
|
||||
|
||||
req = (struct hclge_rss_indirection_table_cmd *)desc.data;
|
||||
|
||||
@ -4278,11 +4298,15 @@ static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
|
||||
req->start_table_index =
|
||||
cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
|
||||
req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
|
||||
|
||||
for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
|
||||
req->rss_result[j] =
|
||||
indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
|
||||
|
||||
for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
|
||||
qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
|
||||
req->rss_qid_l[j] = qid & 0xff;
|
||||
rss_msb_oft =
|
||||
j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
|
||||
rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
|
||||
(j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
|
||||
req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
|
||||
}
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
@ -4311,6 +4335,8 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
|
||||
hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
|
||||
hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
|
||||
HCLGE_RSS_TC_SIZE_S, tc_size[i]);
|
||||
hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
|
||||
tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
|
||||
hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
|
||||
HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
|
||||
|
||||
@ -4601,21 +4627,58 @@ static int hclge_get_tc_size(struct hnae3_handle *handle)
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
|
||||
return hdev->rss_size_max;
|
||||
return hdev->pf_rss_size_max;
|
||||
}
|
||||
|
||||
static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
|
||||
struct hclge_vport *vport = hdev->vport;
|
||||
u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
|
||||
u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
|
||||
u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
|
||||
struct hnae3_tc_info *tc_info;
|
||||
u16 roundup_size;
|
||||
u16 rss_size;
|
||||
int i;
|
||||
|
||||
tc_info = &vport->nic.kinfo.tc_info;
|
||||
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
|
||||
rss_size = tc_info->tqp_count[i];
|
||||
tc_valid[i] = 0;
|
||||
|
||||
if (!(hdev->hw_tc_map & BIT(i)))
|
||||
continue;
|
||||
|
||||
/* tc_size set to hardware is the log2 of roundup power of two
|
||||
* of rss_size, the acutal queue size is limited by indirection
|
||||
* table.
|
||||
*/
|
||||
if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
|
||||
rss_size == 0) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Configure rss tc size failed, invalid TC_SIZE = %u\n",
|
||||
rss_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
roundup_size = roundup_pow_of_two(rss_size);
|
||||
roundup_size = ilog2(roundup_size);
|
||||
|
||||
tc_valid[i] = 1;
|
||||
tc_size[i] = roundup_size;
|
||||
tc_offset[i] = tc_info->tqp_offset[i];
|
||||
}
|
||||
|
||||
return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
|
||||
}
|
||||
|
||||
int hclge_rss_init_hw(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_vport *vport = hdev->vport;
|
||||
u8 *rss_indir = vport[0].rss_indirection_tbl;
|
||||
u16 rss_size = vport[0].alloc_rss_size;
|
||||
u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
|
||||
u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
|
||||
u16 *rss_indir = vport[0].rss_indirection_tbl;
|
||||
u8 *key = vport[0].rss_hash_key;
|
||||
u8 hfunc = vport[0].rss_algo;
|
||||
u16 tc_valid[HCLGE_MAX_TC_NUM];
|
||||
u16 roundup_size;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
ret = hclge_set_rss_indir_table(hdev, rss_indir);
|
||||
@ -4630,32 +4693,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Each TC have the same queue size, and tc_size set to hardware is
|
||||
* the log2 of roundup power of two of rss_size, the acutal queue
|
||||
* size is limited by indirection table.
|
||||
*/
|
||||
if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Configure rss tc size failed, invalid TC_SIZE = %u\n",
|
||||
rss_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
roundup_size = roundup_pow_of_two(rss_size);
|
||||
roundup_size = ilog2(roundup_size);
|
||||
|
||||
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
|
||||
tc_valid[i] = 0;
|
||||
|
||||
if (!(hdev->hw_tc_map & BIT(i)))
|
||||
continue;
|
||||
|
||||
tc_valid[i] = 1;
|
||||
tc_size[i] = roundup_size;
|
||||
tc_offset[i] = rss_size * i;
|
||||
}
|
||||
|
||||
return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
|
||||
return hclge_init_rss_tc_mode(hdev);
|
||||
}
|
||||
|
||||
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
|
||||
@ -5087,6 +5125,7 @@ static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
|
||||
static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
|
||||
struct hclge_fd_ad_data *action)
|
||||
{
|
||||
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
|
||||
struct hclge_fd_ad_config_cmd *req;
|
||||
struct hclge_desc desc;
|
||||
u64 ad_data = 0;
|
||||
@ -5102,6 +5141,12 @@ static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
|
||||
action->write_rule_id_to_bd);
|
||||
hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
|
||||
action->rule_id);
|
||||
if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
|
||||
hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
|
||||
action->override_tc);
|
||||
hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
|
||||
HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
|
||||
}
|
||||
ad_data <<= 32;
|
||||
hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
|
||||
hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
|
||||
@ -5345,16 +5390,22 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
|
||||
static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
|
||||
struct hclge_fd_rule *rule)
|
||||
{
|
||||
struct hclge_vport *vport = hdev->vport;
|
||||
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
|
||||
struct hclge_fd_ad_data ad_data;
|
||||
|
||||
memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
|
||||
ad_data.ad_id = rule->location;
|
||||
|
||||
if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
|
||||
ad_data.drop_packet = true;
|
||||
ad_data.forward_to_direct_queue = false;
|
||||
ad_data.queue_id = 0;
|
||||
} else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
|
||||
ad_data.override_tc = true;
|
||||
ad_data.queue_id =
|
||||
kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
|
||||
ad_data.tc_size =
|
||||
ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
|
||||
} else {
|
||||
ad_data.drop_packet = false;
|
||||
ad_data.forward_to_direct_queue = true;
|
||||
ad_data.queue_id = rule->queue_id;
|
||||
}
|
||||
@ -5871,6 +5922,14 @@ clear_rule:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
|
||||
return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
|
||||
}
|
||||
|
||||
static int hclge_add_fd_entry(struct hnae3_handle *handle,
|
||||
struct ethtool_rxnfc *cmd)
|
||||
{
|
||||
@ -5895,6 +5954,12 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (hclge_is_cls_flower_active(handle)) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"please delete all exist cls flower rules first\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
|
||||
|
||||
ret = hclge_fd_check_spec(hdev, fs, &unused);
|
||||
@ -5925,7 +5990,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
action = HCLGE_FD_ACTION_ACCEPT_PACKET;
|
||||
action = HCLGE_FD_ACTION_SELECT_QUEUE;
|
||||
q_index = ring;
|
||||
}
|
||||
|
||||
@ -5976,7 +6041,8 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
|
||||
if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
|
||||
return -EINVAL;
|
||||
|
||||
if (!hclge_fd_rule_exist(hdev, fs->location)) {
|
||||
if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num ||
|
||||
!hclge_fd_rule_exist(hdev, fs->location)) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Delete fail, rule %u is inexistent\n", fs->location);
|
||||
return -ENOENT;
|
||||
@ -6076,7 +6142,7 @@ static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
|
||||
if (!hnae3_dev_fd_supported(hdev))
|
||||
if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
cmd->rule_cnt = hdev->hclge_fd_rule_num;
|
||||
@ -6419,7 +6485,8 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
|
||||
* arfs should not work
|
||||
*/
|
||||
spin_lock_bh(&hdev->fd_rule_lock);
|
||||
if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
|
||||
if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE ||
|
||||
hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
|
||||
spin_unlock_bh(&hdev->fd_rule_lock);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@ -6447,7 +6514,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
|
||||
|
||||
set_bit(bit_id, hdev->fd_bmap);
|
||||
rule->location = bit_id;
|
||||
rule->flow_id = flow_id;
|
||||
rule->arfs.flow_id = flow_id;
|
||||
rule->queue_id = queue_id;
|
||||
hclge_fd_build_arfs_rule(&new_tuples, rule);
|
||||
ret = hclge_fd_config_rule(hdev, rule);
|
||||
@ -6491,7 +6558,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
|
||||
}
|
||||
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
|
||||
if (rps_may_expire_flow(handle->netdev, rule->queue_id,
|
||||
rule->flow_id, rule->location)) {
|
||||
rule->arfs.flow_id, rule->location)) {
|
||||
hlist_del_init(&rule->rule_node);
|
||||
hlist_add_head(&rule->rule_node, &del_list);
|
||||
hdev->hclge_fd_rule_num--;
|
||||
@ -6520,6 +6587,286 @@ static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
|
||||
#endif
|
||||
}
|
||||
|
||||
static void hclge_get_cls_key_basic(const struct flow_rule *flow,
|
||||
struct hclge_fd_rule *rule)
|
||||
{
|
||||
if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
|
||||
struct flow_match_basic match;
|
||||
u16 ethtype_key, ethtype_mask;
|
||||
|
||||
flow_rule_match_basic(flow, &match);
|
||||
ethtype_key = ntohs(match.key->n_proto);
|
||||
ethtype_mask = ntohs(match.mask->n_proto);
|
||||
|
||||
if (ethtype_key == ETH_P_ALL) {
|
||||
ethtype_key = 0;
|
||||
ethtype_mask = 0;
|
||||
}
|
||||
rule->tuples.ether_proto = ethtype_key;
|
||||
rule->tuples_mask.ether_proto = ethtype_mask;
|
||||
rule->tuples.ip_proto = match.key->ip_proto;
|
||||
rule->tuples_mask.ip_proto = match.mask->ip_proto;
|
||||
} else {
|
||||
rule->unused_tuple |= BIT(INNER_IP_PROTO);
|
||||
rule->unused_tuple |= BIT(INNER_ETH_TYPE);
|
||||
}
|
||||
}
|
||||
|
||||
static void hclge_get_cls_key_mac(const struct flow_rule *flow,
|
||||
struct hclge_fd_rule *rule)
|
||||
{
|
||||
if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
|
||||
struct flow_match_eth_addrs match;
|
||||
|
||||
flow_rule_match_eth_addrs(flow, &match);
|
||||
ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
|
||||
ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
|
||||
ether_addr_copy(rule->tuples.src_mac, match.key->src);
|
||||
ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
|
||||
} else {
|
||||
rule->unused_tuple |= BIT(INNER_DST_MAC);
|
||||
rule->unused_tuple |= BIT(INNER_SRC_MAC);
|
||||
}
|
||||
}
|
||||
|
||||
static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
|
||||
struct hclge_fd_rule *rule)
|
||||
{
|
||||
if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
|
||||
struct flow_match_vlan match;
|
||||
|
||||
flow_rule_match_vlan(flow, &match);
|
||||
rule->tuples.vlan_tag1 = match.key->vlan_id |
|
||||
(match.key->vlan_priority << VLAN_PRIO_SHIFT);
|
||||
rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
|
||||
(match.mask->vlan_priority << VLAN_PRIO_SHIFT);
|
||||
} else {
|
||||
rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
|
||||
}
|
||||
}
|
||||
|
||||
static void hclge_get_cls_key_ip(const struct flow_rule *flow,
|
||||
struct hclge_fd_rule *rule)
|
||||
{
|
||||
u16 addr_type = 0;
|
||||
|
||||
if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
|
||||
struct flow_match_control match;
|
||||
|
||||
flow_rule_match_control(flow, &match);
|
||||
addr_type = match.key->addr_type;
|
||||
}
|
||||
|
||||
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
|
||||
struct flow_match_ipv4_addrs match;
|
||||
|
||||
flow_rule_match_ipv4_addrs(flow, &match);
|
||||
rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
|
||||
rule->tuples_mask.src_ip[IPV4_INDEX] =
|
||||
be32_to_cpu(match.mask->src);
|
||||
rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
|
||||
rule->tuples_mask.dst_ip[IPV4_INDEX] =
|
||||
be32_to_cpu(match.mask->dst);
|
||||
} else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
|
||||
struct flow_match_ipv6_addrs match;
|
||||
|
||||
flow_rule_match_ipv6_addrs(flow, &match);
|
||||
be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
|
||||
IPV6_SIZE);
|
||||
be32_to_cpu_array(rule->tuples_mask.src_ip,
|
||||
match.mask->src.s6_addr32, IPV6_SIZE);
|
||||
be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
|
||||
IPV6_SIZE);
|
||||
be32_to_cpu_array(rule->tuples_mask.dst_ip,
|
||||
match.mask->dst.s6_addr32, IPV6_SIZE);
|
||||
} else {
|
||||
rule->unused_tuple |= BIT(INNER_SRC_IP);
|
||||
rule->unused_tuple |= BIT(INNER_DST_IP);
|
||||
}
|
||||
}
|
||||
|
||||
static void hclge_get_cls_key_port(const struct flow_rule *flow,
|
||||
struct hclge_fd_rule *rule)
|
||||
{
|
||||
if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
|
||||
struct flow_match_ports match;
|
||||
|
||||
flow_rule_match_ports(flow, &match);
|
||||
|
||||
rule->tuples.src_port = be16_to_cpu(match.key->src);
|
||||
rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
|
||||
rule->tuples.dst_port = be16_to_cpu(match.key->dst);
|
||||
rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
|
||||
} else {
|
||||
rule->unused_tuple |= BIT(INNER_SRC_PORT);
|
||||
rule->unused_tuple |= BIT(INNER_DST_PORT);
|
||||
}
|
||||
}
|
||||
|
||||
static int hclge_parse_cls_flower(struct hclge_dev *hdev,
|
||||
struct flow_cls_offload *cls_flower,
|
||||
struct hclge_fd_rule *rule)
|
||||
{
|
||||
struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
|
||||
struct flow_dissector *dissector = flow->match.dissector;
|
||||
|
||||
if (dissector->used_keys &
|
||||
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
|
||||
BIT(FLOW_DISSECTOR_KEY_BASIC) |
|
||||
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
|
||||
BIT(FLOW_DISSECTOR_KEY_VLAN) |
|
||||
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
|
||||
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
|
||||
BIT(FLOW_DISSECTOR_KEY_PORTS))) {
|
||||
dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
|
||||
dissector->used_keys);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
hclge_get_cls_key_basic(flow, rule);
|
||||
hclge_get_cls_key_mac(flow, rule);
|
||||
hclge_get_cls_key_vlan(flow, rule);
|
||||
hclge_get_cls_key_ip(flow, rule);
|
||||
hclge_get_cls_key_port(flow, rule);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hclge_check_cls_flower(struct hclge_dev *hdev,
|
||||
struct flow_cls_offload *cls_flower, int tc)
|
||||
{
|
||||
u32 prio = cls_flower->common.prio;
|
||||
|
||||
if (tc < 0 || tc > hdev->tc_max) {
|
||||
dev_err(&hdev->pdev->dev, "invalid traffic class\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (prio == 0 ||
|
||||
prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"prio %u should be in range[1, %u]\n",
|
||||
prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (test_bit(prio - 1, hdev->fd_bmap)) {
|
||||
dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hclge_add_cls_flower(struct hnae3_handle *handle,
|
||||
struct flow_cls_offload *cls_flower,
|
||||
int tc)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
struct hclge_fd_rule *rule;
|
||||
int ret;
|
||||
|
||||
if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"please remove all exist fd rules via ethtool first\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = hclge_check_cls_flower(hdev, cls_flower, tc);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"failed to check cls flower params, ret = %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
|
||||
if (!rule)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
rule->action = HCLGE_FD_ACTION_SELECT_TC;
|
||||
rule->cls_flower.tc = tc;
|
||||
rule->location = cls_flower->common.prio - 1;
|
||||
rule->vf_id = 0;
|
||||
rule->cls_flower.cookie = cls_flower->cookie;
|
||||
rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
|
||||
|
||||
spin_lock_bh(&hdev->fd_rule_lock);
|
||||
hclge_clear_arfs_rules(handle);
|
||||
|
||||
ret = hclge_fd_config_rule(hdev, rule);
|
||||
|
||||
spin_unlock_bh(&hdev->fd_rule_lock);
|
||||
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"failed to add cls flower rule, ret = %d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err:
|
||||
kfree(rule);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
|
||||
unsigned long cookie)
|
||||
{
|
||||
struct hclge_fd_rule *rule;
|
||||
struct hlist_node *node;
|
||||
|
||||
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
|
||||
if (rule->cls_flower.cookie == cookie)
|
||||
return rule;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int hclge_del_cls_flower(struct hnae3_handle *handle,
|
||||
struct flow_cls_offload *cls_flower)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
struct hclge_fd_rule *rule;
|
||||
int ret;
|
||||
|
||||
spin_lock_bh(&hdev->fd_rule_lock);
|
||||
|
||||
rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
|
||||
if (!rule) {
|
||||
spin_unlock_bh(&hdev->fd_rule_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
|
||||
NULL, false);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"failed to delete cls flower rule %u, ret = %d\n",
|
||||
rule->location, ret);
|
||||
spin_unlock_bh(&hdev->fd_rule_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"failed to delete cls flower rule %u in list, ret = %d\n",
|
||||
rule->location, ret);
|
||||
spin_unlock_bh(&hdev->fd_rule_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
spin_unlock_bh(&hdev->fd_rule_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
@ -10694,12 +11041,10 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
|
||||
static u32 hclge_get_max_channels(struct hnae3_handle *handle)
|
||||
{
|
||||
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
|
||||
return min_t(u32, hdev->rss_size_max,
|
||||
vport->alloc_tqps / kinfo->num_tc);
|
||||
return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
|
||||
}
|
||||
|
||||
static void hclge_get_channels(struct hnae3_handle *handle,
|
||||
@ -10718,7 +11063,7 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
|
||||
*alloc_tqps = vport->alloc_tqps;
|
||||
*max_rss_size = hdev->rss_size_max;
|
||||
*max_rss_size = hdev->pf_rss_size_max;
|
||||
}
|
||||
|
||||
static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
|
||||
@ -10786,7 +11131,7 @@ out:
|
||||
dev_info(&hdev->pdev->dev,
|
||||
"Channels changed, rss_size from %u to %u, tqps from %u to %u",
|
||||
cur_rss_size, kinfo->rss_size,
|
||||
cur_tqps, kinfo->rss_size * kinfo->num_tc);
|
||||
cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -11519,6 +11864,9 @@ static const struct hnae3_ae_ops hclge_ops = {
|
||||
.set_vf_mac = hclge_set_vf_mac,
|
||||
.get_module_eeprom = hclge_get_module_eeprom,
|
||||
.get_cmdq_stat = hclge_get_cmdq_stat,
|
||||
.add_cls_flower = hclge_add_cls_flower,
|
||||
.del_cls_flower = hclge_del_cls_flower,
|
||||
.cls_flower_active = hclge_is_cls_flower_active,
|
||||
};
|
||||
|
||||
static struct hnae3_ae_algo ae_algo = {
|
||||
|
@ -348,7 +348,8 @@ struct hclge_cfg {
|
||||
u8 tc_num;
|
||||
u16 tqp_desc_num;
|
||||
u16 rx_buf_len;
|
||||
u16 rss_size_max;
|
||||
u16 vf_rss_size_max;
|
||||
u16 pf_rss_size_max;
|
||||
u8 phy_addr;
|
||||
u8 media_type;
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
@ -564,6 +565,7 @@ enum HCLGE_FD_ACTIVE_RULE_TYPE {
|
||||
HCLGE_FD_RULE_NONE,
|
||||
HCLGE_FD_ARFS_ACTIVE,
|
||||
HCLGE_FD_EP_ACTIVE,
|
||||
HCLGE_FD_TC_FLOWER_ACTIVE,
|
||||
};
|
||||
|
||||
enum HCLGE_FD_PACKET_TYPE {
|
||||
@ -572,8 +574,9 @@ enum HCLGE_FD_PACKET_TYPE {
|
||||
};
|
||||
|
||||
enum HCLGE_FD_ACTION {
|
||||
HCLGE_FD_ACTION_ACCEPT_PACKET,
|
||||
HCLGE_FD_ACTION_SELECT_QUEUE,
|
||||
HCLGE_FD_ACTION_DROP_PACKET,
|
||||
HCLGE_FD_ACTION_SELECT_TC,
|
||||
};
|
||||
|
||||
struct hclge_fd_key_cfg {
|
||||
@ -618,12 +621,20 @@ struct hclge_fd_rule {
|
||||
struct hclge_fd_rule_tuples tuples_mask;
|
||||
u32 unused_tuple;
|
||||
u32 flow_type;
|
||||
u8 action;
|
||||
u16 vf_id;
|
||||
union {
|
||||
struct {
|
||||
unsigned long cookie;
|
||||
u8 tc;
|
||||
} cls_flower;
|
||||
struct {
|
||||
u16 flow_id; /* only used for arfs */
|
||||
} arfs;
|
||||
};
|
||||
u16 queue_id;
|
||||
u16 vf_id;
|
||||
u16 location;
|
||||
u16 flow_id; /* only used for arfs */
|
||||
enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type;
|
||||
u8 action;
|
||||
};
|
||||
|
||||
struct hclge_fd_ad_data {
|
||||
@ -637,6 +648,8 @@ struct hclge_fd_ad_data {
|
||||
u8 write_rule_id_to_bd;
|
||||
u8 next_input_key;
|
||||
u16 rule_id;
|
||||
u16 tc_size;
|
||||
u8 override_tc;
|
||||
};
|
||||
|
||||
enum HCLGE_MAC_NODE_STATE {
|
||||
@ -745,7 +758,8 @@ struct hclge_dev {
|
||||
|
||||
u16 base_tqp_pid; /* Base task tqp physical id of this PF */
|
||||
u16 alloc_rss_size; /* Allocated RSS task queue */
|
||||
u16 rss_size_max; /* HW defined max RSS task queue */
|
||||
u16 vf_rss_size_max; /* HW defined VF max RSS task queue */
|
||||
u16 pf_rss_size_max; /* HW defined PF max RSS task queue */
|
||||
|
||||
u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
|
||||
u16 num_alloc_vport; /* Num vports this driver supports */
|
||||
@ -906,7 +920,7 @@ struct hclge_vport {
|
||||
|
||||
u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
|
||||
/* User configured lookup table entries */
|
||||
u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
|
||||
u16 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
|
||||
int rss_algo; /* User configured hash algorithm */
|
||||
/* User configured rss tuple sets */
|
||||
struct hclge_rss_tuple_cfg rss_tuple_sets;
|
||||
|
@ -414,7 +414,7 @@ static void hclge_get_vf_tcinfo(struct hclge_vport *vport,
|
||||
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < kinfo->num_tc; i++)
|
||||
for (i = 0; i < kinfo->tc_info.num_tc; i++)
|
||||
resp_msg->data[0] |= BIT(i);
|
||||
|
||||
resp_msg->len = sizeof(u8);
|
||||
|
@ -565,7 +565,7 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
|
||||
HCLGE_SHAPER_BS_U_DEF,
|
||||
HCLGE_SHAPER_BS_S_DEF);
|
||||
|
||||
for (i = 0; i < kinfo->num_tc; i++) {
|
||||
for (i = 0; i < kinfo->tc_info.num_tc; i++) {
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG,
|
||||
false);
|
||||
|
||||
@ -589,23 +589,66 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport)
|
||||
{
|
||||
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
|
||||
struct hnae3_tc_info *tc_info = &kinfo->tc_info;
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
u16 max_rss_size = 0;
|
||||
int i;
|
||||
|
||||
if (!tc_info->mqprio_active)
|
||||
return vport->alloc_tqps / tc_info->num_tc;
|
||||
|
||||
for (i = 0; i < HNAE3_MAX_TC; i++) {
|
||||
if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc)
|
||||
continue;
|
||||
if (max_rss_size < tc_info->tqp_count[i])
|
||||
max_rss_size = tc_info->tqp_count[i];
|
||||
}
|
||||
|
||||
return max_rss_size;
|
||||
}
|
||||
|
||||
static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport)
|
||||
{
|
||||
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
|
||||
struct hnae3_tc_info *tc_info = &kinfo->tc_info;
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
int sum = 0;
|
||||
int i;
|
||||
|
||||
if (!tc_info->mqprio_active)
|
||||
return kinfo->rss_size * tc_info->num_tc;
|
||||
|
||||
for (i = 0; i < HNAE3_MAX_TC; i++) {
|
||||
if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc)
|
||||
sum += tc_info->tqp_count[i];
|
||||
}
|
||||
|
||||
return sum;
|
||||
}
|
||||
|
||||
static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
|
||||
{
|
||||
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
u16 vport_max_rss_size;
|
||||
u16 max_rss_size;
|
||||
u8 i;
|
||||
|
||||
/* TC configuration is shared by PF/VF in one port, only allow
|
||||
* one tc for VF for simplicity. VF's vport_id is non zero.
|
||||
*/
|
||||
kinfo->num_tc = vport->vport_id ? 1 :
|
||||
kinfo->tc_info.num_tc = vport->vport_id ? 1 :
|
||||
min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
|
||||
vport->qs_offset = (vport->vport_id ? HNAE3_MAX_TC : 0) +
|
||||
(vport->vport_id ? (vport->vport_id - 1) : 0);
|
||||
|
||||
max_rss_size = min_t(u16, hdev->rss_size_max,
|
||||
vport->alloc_tqps / kinfo->num_tc);
|
||||
vport_max_rss_size = vport->vport_id ? hdev->vf_rss_size_max :
|
||||
hdev->pf_rss_size_max;
|
||||
max_rss_size = min_t(u16, vport_max_rss_size,
|
||||
hclge_vport_get_max_rss_size(vport));
|
||||
|
||||
/* Set to user value, no larger than max_rss_size. */
|
||||
if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
|
||||
@ -622,34 +665,36 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
|
||||
if (!kinfo->req_rss_size)
|
||||
max_rss_size = min_t(u16, max_rss_size,
|
||||
(hdev->num_nic_msi - 1) /
|
||||
kinfo->num_tc);
|
||||
kinfo->tc_info.num_tc);
|
||||
|
||||
/* Set to the maximum specification value (max_rss_size). */
|
||||
kinfo->rss_size = max_rss_size;
|
||||
}
|
||||
|
||||
kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
|
||||
kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
|
||||
vport->dwrr = 100; /* 100 percent as init */
|
||||
vport->alloc_rss_size = kinfo->rss_size;
|
||||
vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
|
||||
|
||||
/* when enable mqprio, the tc_info has been updated. */
|
||||
if (kinfo->tc_info.mqprio_active)
|
||||
return;
|
||||
|
||||
for (i = 0; i < HNAE3_MAX_TC; i++) {
|
||||
if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) {
|
||||
kinfo->tc_info[i].enable = true;
|
||||
kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
|
||||
kinfo->tc_info[i].tqp_count = kinfo->rss_size;
|
||||
kinfo->tc_info[i].tc = i;
|
||||
if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) {
|
||||
set_bit(i, &kinfo->tc_info.tc_en);
|
||||
kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size;
|
||||
kinfo->tc_info.tqp_count[i] = kinfo->rss_size;
|
||||
} else {
|
||||
/* Set to default queue if TC is disable */
|
||||
kinfo->tc_info[i].enable = false;
|
||||
kinfo->tc_info[i].tqp_offset = 0;
|
||||
kinfo->tc_info[i].tqp_count = 1;
|
||||
kinfo->tc_info[i].tc = 0;
|
||||
clear_bit(i, &kinfo->tc_info.tc_en);
|
||||
kinfo->tc_info.tqp_offset[i] = 0;
|
||||
kinfo->tc_info.tqp_count[i] = 1;
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
|
||||
sizeof_field(struct hnae3_knic_private_info, prio_tc));
|
||||
memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc,
|
||||
sizeof_field(struct hnae3_tc_info, prio_tc));
|
||||
}
|
||||
|
||||
static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
|
||||
@ -854,15 +899,14 @@ static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
|
||||
struct hclge_vport *vport)
|
||||
{
|
||||
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
|
||||
struct hnae3_tc_info *tc_info = &kinfo->tc_info;
|
||||
struct hnae3_queue **tqp = kinfo->tqp;
|
||||
struct hnae3_tc_info *v_tc_info;
|
||||
u32 i, j;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < kinfo->num_tc; i++) {
|
||||
v_tc_info = &kinfo->tc_info[i];
|
||||
for (j = 0; j < v_tc_info->tqp_count; j++) {
|
||||
struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
|
||||
for (i = 0; i < tc_info->num_tc; i++) {
|
||||
for (j = 0; j < tc_info->tqp_count[i]; j++) {
|
||||
struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j];
|
||||
|
||||
ret = hclge_tm_q_to_qs_map_cfg(hdev,
|
||||
hclge_get_queue_id(q),
|
||||
@ -887,7 +931,7 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
|
||||
struct hnae3_knic_private_info *kinfo =
|
||||
&vport[k].nic.kinfo;
|
||||
|
||||
for (i = 0; i < kinfo->num_tc; i++) {
|
||||
for (i = 0; i < kinfo->tc_info.num_tc; i++) {
|
||||
ret = hclge_tm_qs_to_pri_map_cfg(
|
||||
hdev, vport[k].qs_offset + i, i);
|
||||
if (ret)
|
||||
@ -1001,7 +1045,7 @@ static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
|
||||
u32 i;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < kinfo->num_tc; i++) {
|
||||
for (i = 0; i < kinfo->tc_info.num_tc; i++) {
|
||||
ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
|
||||
HCLGE_SHAPER_LVL_QSET,
|
||||
&ir_para, max_tm_rate);
|
||||
@ -1123,7 +1167,7 @@ static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
|
||||
return ret;
|
||||
|
||||
/* Qset dwrr */
|
||||
for (i = 0; i < kinfo->num_tc; i++) {
|
||||
for (i = 0; i < kinfo->tc_info.num_tc; i++) {
|
||||
ret = hclge_tm_qs_weight_cfg(
|
||||
hdev, vport->qs_offset + i,
|
||||
hdev->tm_info.pg_info[0].tc_dwrr[i]);
|
||||
@ -1254,7 +1298,7 @@ static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < kinfo->num_tc; i++) {
|
||||
for (i = 0; i < kinfo->tc_info.num_tc; i++) {
|
||||
u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
|
||||
|
||||
ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
|
||||
@ -1484,7 +1528,7 @@ void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
|
||||
|
||||
for (k = 0; k < hdev->num_alloc_vport; k++) {
|
||||
kinfo = &vport[k].nic.kinfo;
|
||||
kinfo->prio_tc[i] = prio_tc[i];
|
||||
kinfo->tc_info.prio_tc[i] = prio_tc[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -433,19 +433,20 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
|
||||
struct hnae3_knic_private_info *kinfo;
|
||||
u16 new_tqps = hdev->num_tqps;
|
||||
unsigned int i;
|
||||
u8 num_tc = 0;
|
||||
|
||||
kinfo = &nic->kinfo;
|
||||
kinfo->num_tc = 0;
|
||||
kinfo->num_tx_desc = hdev->num_tx_desc;
|
||||
kinfo->num_rx_desc = hdev->num_rx_desc;
|
||||
kinfo->rx_buf_len = hdev->rx_buf_len;
|
||||
for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
|
||||
if (hdev->hw_tc_map & BIT(i))
|
||||
kinfo->num_tc++;
|
||||
num_tc++;
|
||||
|
||||
kinfo->rss_size
|
||||
= min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
|
||||
new_tqps = kinfo->rss_size * kinfo->num_tc;
|
||||
num_tc = num_tc ? num_tc : 1;
|
||||
kinfo->tc_info.num_tc = num_tc;
|
||||
kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc);
|
||||
new_tqps = kinfo->rss_size * num_tc;
|
||||
kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
|
||||
|
||||
kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
|
||||
@ -463,7 +464,7 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
|
||||
* and rss size with the actual vector numbers
|
||||
*/
|
||||
kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps);
|
||||
kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc,
|
||||
kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc,
|
||||
kinfo->rss_size);
|
||||
|
||||
return 0;
|
||||
@ -3360,7 +3361,7 @@ static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
|
||||
struct hnae3_knic_private_info *kinfo = &nic->kinfo;
|
||||
|
||||
return min_t(u32, hdev->rss_size_max,
|
||||
hdev->num_tqps / kinfo->num_tc);
|
||||
hdev->num_tqps / kinfo->tc_info.num_tc);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -3403,7 +3404,7 @@ static void hclgevf_update_rss_size(struct hnae3_handle *handle,
|
||||
kinfo->req_rss_size = new_tqps_num;
|
||||
|
||||
max_rss_size = min_t(u16, hdev->rss_size_max,
|
||||
hdev->num_tqps / kinfo->num_tc);
|
||||
hdev->num_tqps / kinfo->tc_info.num_tc);
|
||||
|
||||
/* Use the user's configuration when it is not larger than
|
||||
* max_rss_size, otherwise, use the maximum specification value.
|
||||
@ -3415,7 +3416,7 @@ static void hclgevf_update_rss_size(struct hnae3_handle *handle,
|
||||
(!kinfo->req_rss_size && kinfo->rss_size < max_rss_size))
|
||||
kinfo->rss_size = max_rss_size;
|
||||
|
||||
kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
|
||||
kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size;
|
||||
}
|
||||
|
||||
static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
|
||||
@ -3461,7 +3462,7 @@ out:
|
||||
dev_info(&hdev->pdev->dev,
|
||||
"Channels changed, rss_size from %u to %u, tqps from %u to %u",
|
||||
cur_rss_size, kinfo->rss_size,
|
||||
cur_tqps, kinfo->rss_size * kinfo->num_tc);
|
||||
cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user