Merge branch 'hns3-next'
Huazhong Tan says: ==================== net: hns3: some optimizaions related to work task This series refactors the work task of the HNS3 ethernet driver. [patch 1/5] uses delayed workqueue to replace the timer for hclgevf_service task, make the code simpler. [patch 2/5] & [patch 3/5] unifies current mailbox, reset and service work into one. [patch 4/5] allocates a private work queue with WQ_MEM_RECLAIM for the HNS3 driver. [patch 5/5] adds a new flag to indicate whether reset fails, and prevent scheduling service task to handle periodic task when this flag has been set. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
366c7bb0ac
@ -976,6 +976,14 @@ void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
|
||||
dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
|
||||
}
|
||||
|
||||
static void hclge_dbg_dump_serv_info(struct hclge_dev *hdev)
|
||||
{
|
||||
dev_info(&hdev->pdev->dev, "last_serv_processed: %lu\n",
|
||||
hdev->last_serv_processed);
|
||||
dev_info(&hdev->pdev->dev, "last_serv_cnt: %lu\n",
|
||||
hdev->serv_processed_cnt);
|
||||
}
|
||||
|
||||
static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_desc *desc_src, *desc_tmp;
|
||||
@ -1227,6 +1235,8 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
|
||||
hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof(DUMP_REG)]);
|
||||
} else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
|
||||
hclge_dbg_dump_rst_info(hdev);
|
||||
} else if (strncmp(cmd_buf, "dump serv info", 14) == 0) {
|
||||
hclge_dbg_dump_serv_info(hdev);
|
||||
} else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) {
|
||||
hclge_dbg_get_m7_stats_info(hdev);
|
||||
} else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) {
|
||||
|
@ -72,6 +72,8 @@ static int hclge_set_default_loopback(struct hclge_dev *hdev);
|
||||
|
||||
static struct hnae3_ae_algo ae_algo;
|
||||
|
||||
static struct workqueue_struct *hclge_wq;
|
||||
|
||||
static const struct pci_device_id ae_algo_pci_tbl[] = {
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
|
||||
@ -416,7 +418,7 @@ static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
|
||||
{
|
||||
#define HCLGE_MAC_CMD_NUM 21
|
||||
|
||||
u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
|
||||
u64 *data = (u64 *)(&hdev->mac_stats);
|
||||
struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
|
||||
__le64 *desc_data;
|
||||
int i, k, n;
|
||||
@ -453,7 +455,7 @@ static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
|
||||
|
||||
static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
|
||||
{
|
||||
u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
|
||||
u64 *data = (u64 *)(&hdev->mac_stats);
|
||||
struct hclge_desc *desc;
|
||||
__le64 *desc_data;
|
||||
u16 i, k, n;
|
||||
@ -802,7 +804,7 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
u64 *p;
|
||||
|
||||
p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
|
||||
p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
|
||||
ARRAY_SIZE(g_mac_stats_string), data);
|
||||
p = hclge_tqps_get_stats(handle, p);
|
||||
}
|
||||
@ -815,8 +817,8 @@ static void hclge_get_mac_stat(struct hnae3_handle *handle,
|
||||
|
||||
hclge_update_stats(handle, NULL);
|
||||
|
||||
mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
|
||||
mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
|
||||
mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
|
||||
mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
|
||||
}
|
||||
|
||||
static int hclge_parse_func_status(struct hclge_dev *hdev,
|
||||
@ -2665,31 +2667,27 @@ static int hclge_mac_init(struct hclge_dev *hdev)
|
||||
|
||||
static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
|
||||
{
|
||||
if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
|
||||
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
|
||||
!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
|
||||
queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
|
||||
&hdev->mbx_service_task);
|
||||
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
|
||||
hclge_wq, &hdev->service_task, 0);
|
||||
}
|
||||
|
||||
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
|
||||
{
|
||||
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
|
||||
!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
|
||||
queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
|
||||
&hdev->rst_service_task);
|
||||
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
|
||||
hclge_wq, &hdev->service_task, 0);
|
||||
}
|
||||
|
||||
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
|
||||
{
|
||||
if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
|
||||
!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
|
||||
!test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
|
||||
hdev->hw_stats.stats_timer++;
|
||||
hdev->fd_arfs_expire_timer++;
|
||||
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
|
||||
!test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
|
||||
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
|
||||
system_wq, &hdev->service_task,
|
||||
hclge_wq, &hdev->service_task,
|
||||
delay_time);
|
||||
}
|
||||
}
|
||||
|
||||
static int hclge_get_mac_link_status(struct hclge_dev *hdev)
|
||||
@ -2748,6 +2746,10 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
|
||||
|
||||
if (!client)
|
||||
return;
|
||||
|
||||
if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
|
||||
return;
|
||||
|
||||
state = hclge_get_mac_phy_link(hdev);
|
||||
if (state != hdev->hw.mac.link) {
|
||||
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
|
||||
@ -2761,6 +2763,8 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
|
||||
}
|
||||
hdev->hw.mac.link = state;
|
||||
}
|
||||
|
||||
clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
|
||||
}
|
||||
|
||||
static void hclge_update_port_capability(struct hclge_mac *mac)
|
||||
@ -3352,6 +3356,18 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hclge_mailbox_service_task(struct hclge_dev *hdev)
|
||||
{
|
||||
if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
|
||||
test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
|
||||
test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
|
||||
return;
|
||||
|
||||
hclge_mbx_handler(hdev);
|
||||
|
||||
clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
|
||||
}
|
||||
|
||||
static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_pf_rst_sync_cmd *req;
|
||||
@ -3363,6 +3379,9 @@ static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
|
||||
|
||||
do {
|
||||
/* vf need to down netdev by mbx during PF or FLR reset */
|
||||
hclge_mailbox_service_task(hdev);
|
||||
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
/* for compatible with old firmware, wait
|
||||
* 100 ms for VF to stop IO
|
||||
@ -3672,6 +3691,8 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
|
||||
|
||||
hclge_dbg_dump_rst_info(hdev);
|
||||
|
||||
set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -3825,6 +3846,7 @@ static void hclge_reset(struct hclge_dev *hdev)
|
||||
hdev->rst_stats.reset_fail_cnt = 0;
|
||||
hdev->rst_stats.reset_done_cnt++;
|
||||
ae_dev->reset_type = HNAE3_NONE_RESET;
|
||||
clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
|
||||
|
||||
/* if default_reset_request has a higher level reset request,
|
||||
* it should be handled as soon as possible. since some errors
|
||||
@ -3939,36 +3961,19 @@ static void hclge_reset_subtask(struct hclge_dev *hdev)
|
||||
hdev->reset_type = HNAE3_NONE_RESET;
|
||||
}
|
||||
|
||||
static void hclge_reset_service_task(struct work_struct *work)
|
||||
static void hclge_reset_service_task(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_dev *hdev =
|
||||
container_of(work, struct hclge_dev, rst_service_task);
|
||||
if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
|
||||
return;
|
||||
|
||||
if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
|
||||
return;
|
||||
|
||||
clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
|
||||
|
||||
hclge_reset_subtask(hdev);
|
||||
|
||||
clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
|
||||
}
|
||||
|
||||
static void hclge_mailbox_service_task(struct work_struct *work)
|
||||
{
|
||||
struct hclge_dev *hdev =
|
||||
container_of(work, struct hclge_dev, mbx_service_task);
|
||||
|
||||
if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
|
||||
return;
|
||||
|
||||
clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
|
||||
|
||||
hclge_mbx_handler(hdev);
|
||||
|
||||
clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
|
||||
}
|
||||
|
||||
static void hclge_update_vport_alive(struct hclge_dev *hdev)
|
||||
{
|
||||
int i;
|
||||
@ -3986,29 +3991,62 @@ static void hclge_update_vport_alive(struct hclge_dev *hdev)
|
||||
}
|
||||
}
|
||||
|
||||
static void hclge_periodic_service_task(struct hclge_dev *hdev)
|
||||
{
|
||||
unsigned long delta = round_jiffies_relative(HZ);
|
||||
|
||||
/* Always handle the link updating to make sure link state is
|
||||
* updated when it is triggered by mbx.
|
||||
*/
|
||||
hclge_update_link_status(hdev);
|
||||
|
||||
if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
|
||||
delta = jiffies - hdev->last_serv_processed;
|
||||
|
||||
if (delta < round_jiffies_relative(HZ)) {
|
||||
delta = round_jiffies_relative(HZ) - delta;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
hdev->serv_processed_cnt++;
|
||||
hclge_update_vport_alive(hdev);
|
||||
|
||||
if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
|
||||
hdev->last_serv_processed = jiffies;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
|
||||
hclge_update_stats_for_all(hdev);
|
||||
|
||||
hclge_update_port_info(hdev);
|
||||
hclge_sync_vlan_filter(hdev);
|
||||
|
||||
if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
|
||||
hclge_rfs_filter_expire(hdev);
|
||||
|
||||
hdev->last_serv_processed = jiffies;
|
||||
|
||||
out:
|
||||
hclge_task_schedule(hdev, delta);
|
||||
}
|
||||
|
||||
static void hclge_service_task(struct work_struct *work)
|
||||
{
|
||||
struct hclge_dev *hdev =
|
||||
container_of(work, struct hclge_dev, service_task.work);
|
||||
|
||||
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
|
||||
hclge_reset_service_task(hdev);
|
||||
hclge_mailbox_service_task(hdev);
|
||||
hclge_periodic_service_task(hdev);
|
||||
|
||||
if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
|
||||
hclge_update_stats_for_all(hdev);
|
||||
hdev->hw_stats.stats_timer = 0;
|
||||
}
|
||||
|
||||
hclge_update_port_info(hdev);
|
||||
hclge_update_link_status(hdev);
|
||||
hclge_update_vport_alive(hdev);
|
||||
hclge_sync_vlan_filter(hdev);
|
||||
|
||||
if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
|
||||
hclge_rfs_filter_expire(hdev);
|
||||
hdev->fd_arfs_expire_timer = 0;
|
||||
}
|
||||
|
||||
hclge_task_schedule(hdev, round_jiffies_relative(HZ));
|
||||
/* Handle reset and mbx again in case periodical task delays the
|
||||
* handling by calling hclge_task_schedule() in
|
||||
* hclge_periodic_service_task().
|
||||
*/
|
||||
hclge_reset_service_task(hdev);
|
||||
hclge_mailbox_service_task(hdev);
|
||||
}
|
||||
|
||||
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
|
||||
@ -6734,6 +6772,19 @@ static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
|
||||
}
|
||||
}
|
||||
|
||||
static void hclge_flush_link_update(struct hclge_dev *hdev)
|
||||
{
|
||||
#define HCLGE_FLUSH_LINK_TIMEOUT 100000
|
||||
|
||||
unsigned long last = hdev->serv_processed_cnt;
|
||||
int i = 0;
|
||||
|
||||
while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
|
||||
i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
|
||||
last == hdev->serv_processed_cnt)
|
||||
usleep_range(1, 1);
|
||||
}
|
||||
|
||||
static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
@ -6742,12 +6793,12 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
|
||||
if (enable) {
|
||||
hclge_task_schedule(hdev, round_jiffies_relative(HZ));
|
||||
} else {
|
||||
/* Set the DOWN flag here to disable the service to be
|
||||
* scheduled again
|
||||
*/
|
||||
/* Set the DOWN flag here to disable link updating */
|
||||
set_bit(HCLGE_STATE_DOWN, &hdev->state);
|
||||
cancel_delayed_work_sync(&hdev->service_task);
|
||||
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
|
||||
|
||||
/* flush memory to make sure DOWN is seen by service task */
|
||||
smp_mb__before_atomic();
|
||||
hclge_flush_link_update(hdev);
|
||||
}
|
||||
}
|
||||
|
||||
@ -9256,6 +9307,7 @@ static void hclge_state_init(struct hclge_dev *hdev)
|
||||
set_bit(HCLGE_STATE_DOWN, &hdev->state);
|
||||
clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
|
||||
clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
|
||||
clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
|
||||
clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
|
||||
clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
|
||||
}
|
||||
@ -9269,10 +9321,6 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
|
||||
del_timer_sync(&hdev->reset_timer);
|
||||
if (hdev->service_task.work.func)
|
||||
cancel_delayed_work_sync(&hdev->service_task);
|
||||
if (hdev->rst_service_task.func)
|
||||
cancel_work_sync(&hdev->rst_service_task);
|
||||
if (hdev->mbx_service_task.func)
|
||||
cancel_work_sync(&hdev->mbx_service_task);
|
||||
}
|
||||
|
||||
static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
|
||||
@ -9477,8 +9525,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
|
||||
timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
|
||||
INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
|
||||
INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
|
||||
INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
|
||||
|
||||
/* Setup affinity after service timer setup because add_timer_on
|
||||
* is called in affinity notify.
|
||||
@ -9512,6 +9558,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
|
||||
HCLGE_DRIVER_NAME);
|
||||
|
||||
hclge_task_schedule(hdev, round_jiffies_relative(HZ));
|
||||
|
||||
return 0;
|
||||
|
||||
err_mdiobus_unreg:
|
||||
@ -9534,7 +9582,7 @@ out:
|
||||
|
||||
static void hclge_stats_clear(struct hclge_dev *hdev)
|
||||
{
|
||||
memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
|
||||
memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
|
||||
}
|
||||
|
||||
static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
|
||||
@ -10611,6 +10659,12 @@ static int hclge_init(void)
|
||||
{
|
||||
pr_info("%s is initializing\n", HCLGE_NAME);
|
||||
|
||||
hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
|
||||
if (!hclge_wq) {
|
||||
pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
hnae3_register_ae_algo(&ae_algo);
|
||||
|
||||
return 0;
|
||||
@ -10619,6 +10673,7 @@ static int hclge_init(void)
|
||||
static void hclge_exit(void)
|
||||
{
|
||||
hnae3_unregister_ae_algo(&ae_algo);
|
||||
destroy_workqueue(hclge_wq);
|
||||
}
|
||||
module_init(hclge_init);
|
||||
module_exit(hclge_exit);
|
||||
|
@ -208,13 +208,14 @@ enum HCLGE_DEV_STATE {
|
||||
HCLGE_STATE_NIC_REGISTERED,
|
||||
HCLGE_STATE_ROCE_REGISTERED,
|
||||
HCLGE_STATE_SERVICE_INITED,
|
||||
HCLGE_STATE_SERVICE_SCHED,
|
||||
HCLGE_STATE_RST_SERVICE_SCHED,
|
||||
HCLGE_STATE_RST_HANDLING,
|
||||
HCLGE_STATE_MBX_SERVICE_SCHED,
|
||||
HCLGE_STATE_MBX_HANDLING,
|
||||
HCLGE_STATE_STATISTICS_UPDATING,
|
||||
HCLGE_STATE_CMD_DISABLE,
|
||||
HCLGE_STATE_LINK_UPDATING,
|
||||
HCLGE_STATE_RST_FAIL,
|
||||
HCLGE_STATE_MAX
|
||||
};
|
||||
|
||||
@ -454,11 +455,7 @@ struct hclge_mac_stats {
|
||||
u64 mac_rx_ctrl_pkt_num;
|
||||
};
|
||||
|
||||
#define HCLGE_STATS_TIMER_INTERVAL (60 * 5)
|
||||
struct hclge_hw_stats {
|
||||
struct hclge_mac_stats mac_stats;
|
||||
u32 stats_timer;
|
||||
};
|
||||
#define HCLGE_STATS_TIMER_INTERVAL 300UL
|
||||
|
||||
struct hclge_vlan_type_cfg {
|
||||
u16 rx_ot_fst_vlan_type;
|
||||
@ -549,7 +546,7 @@ struct key_info {
|
||||
|
||||
/* assigned by firmware, the real filter number for each pf may be less */
|
||||
#define MAX_FD_FILTER_NUM 4096
|
||||
#define HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL 5
|
||||
#define HCLGE_ARFS_EXPIRE_INTERVAL 5UL
|
||||
|
||||
enum HCLGE_FD_ACTIVE_RULE_TYPE {
|
||||
HCLGE_FD_RULE_NONE,
|
||||
@ -712,7 +709,7 @@ struct hclge_dev {
|
||||
struct hnae3_ae_dev *ae_dev;
|
||||
struct hclge_hw hw;
|
||||
struct hclge_misc_vector misc_vector;
|
||||
struct hclge_hw_stats hw_stats;
|
||||
struct hclge_mac_stats mac_stats;
|
||||
unsigned long state;
|
||||
unsigned long flr_state;
|
||||
unsigned long last_reset_time;
|
||||
@ -774,8 +771,6 @@ struct hclge_dev {
|
||||
unsigned long service_timer_previous;
|
||||
struct timer_list reset_timer;
|
||||
struct delayed_work service_task;
|
||||
struct work_struct rst_service_task;
|
||||
struct work_struct mbx_service_task;
|
||||
|
||||
bool cur_promisc;
|
||||
int num_alloc_vfs; /* Actual number of VFs allocated */
|
||||
@ -811,7 +806,8 @@ struct hclge_dev {
|
||||
struct hlist_head fd_rule_list;
|
||||
spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */
|
||||
u16 hclge_fd_rule_num;
|
||||
u16 fd_arfs_expire_timer;
|
||||
unsigned long serv_processed_cnt;
|
||||
unsigned long last_serv_processed;
|
||||
unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)];
|
||||
enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type;
|
||||
u8 fd_en;
|
||||
|
@ -635,7 +635,6 @@ static void hclge_handle_link_change_event(struct hclge_dev *hdev,
|
||||
#define LINK_STATUS_OFFSET 1
|
||||
#define LINK_FAIL_CODE_OFFSET 2
|
||||
|
||||
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
|
||||
hclge_task_schedule(hdev, 0);
|
||||
|
||||
if (!req->msg[LINK_STATUS_OFFSET])
|
||||
|
@ -16,6 +16,8 @@
|
||||
static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
|
||||
static struct hnae3_ae_algo ae_algovf;
|
||||
|
||||
static struct workqueue_struct *hclgevf_wq;
|
||||
|
||||
static const struct pci_device_id ae_algovf_pci_tbl[] = {
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
|
||||
@ -440,6 +442,9 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
|
||||
struct hnae3_client *rclient;
|
||||
struct hnae3_client *client;
|
||||
|
||||
if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state))
|
||||
return;
|
||||
|
||||
client = handle->client;
|
||||
rclient = hdev->roce_client;
|
||||
|
||||
@ -452,6 +457,8 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
|
||||
rclient->ops->link_status_change(rhandle, !!link_state);
|
||||
hdev->hw.mac.link = link_state;
|
||||
}
|
||||
|
||||
clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state);
|
||||
}
|
||||
|
||||
static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
|
||||
@ -1591,6 +1598,7 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
|
||||
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
|
||||
hclgevf_reset_task_schedule(hdev);
|
||||
} else {
|
||||
set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
|
||||
hclgevf_dump_rst_info(hdev);
|
||||
}
|
||||
}
|
||||
@ -1652,6 +1660,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
|
||||
ae_dev->reset_type = HNAE3_NONE_RESET;
|
||||
hdev->rst_stats.rst_done_cnt++;
|
||||
hdev->rst_stats.rst_fail_cnt = 0;
|
||||
clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
|
||||
|
||||
return ret;
|
||||
err_reset_lock:
|
||||
@ -1767,63 +1776,40 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
|
||||
|
||||
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
|
||||
{
|
||||
if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) &&
|
||||
!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state)) {
|
||||
set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
|
||||
schedule_work(&hdev->rst_service_task);
|
||||
}
|
||||
if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
|
||||
!test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
|
||||
&hdev->state))
|
||||
mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
|
||||
}
|
||||
|
||||
void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
|
||||
{
|
||||
if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) &&
|
||||
!test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) {
|
||||
set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
|
||||
schedule_work(&hdev->mbx_service_task);
|
||||
}
|
||||
if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
|
||||
!test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED,
|
||||
&hdev->state))
|
||||
mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
|
||||
}
|
||||
|
||||
static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
|
||||
static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
|
||||
unsigned long delay)
|
||||
{
|
||||
if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) &&
|
||||
!test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
|
||||
schedule_work(&hdev->service_task);
|
||||
if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
|
||||
!test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
|
||||
mod_delayed_work(hclgevf_wq, &hdev->service_task, delay);
|
||||
}
|
||||
|
||||
static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev)
|
||||
{
|
||||
/* if we have any pending mailbox event then schedule the mbx task */
|
||||
if (hdev->mbx_event_pending)
|
||||
hclgevf_mbx_task_schedule(hdev);
|
||||
|
||||
if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state))
|
||||
hclgevf_reset_task_schedule(hdev);
|
||||
}
|
||||
|
||||
static void hclgevf_service_timer(struct timer_list *t)
|
||||
{
|
||||
struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
|
||||
|
||||
mod_timer(&hdev->service_timer, jiffies +
|
||||
HCLGEVF_GENERAL_TASK_INTERVAL * HZ);
|
||||
|
||||
hdev->stats_timer++;
|
||||
hclgevf_task_schedule(hdev);
|
||||
}
|
||||
|
||||
static void hclgevf_reset_service_task(struct work_struct *work)
|
||||
static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
|
||||
{
|
||||
#define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3
|
||||
|
||||
struct hclgevf_dev *hdev =
|
||||
container_of(work, struct hclgevf_dev, rst_service_task);
|
||||
int ret;
|
||||
|
||||
if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state))
|
||||
return;
|
||||
|
||||
if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
|
||||
return;
|
||||
|
||||
clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
|
||||
|
||||
if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
|
||||
&hdev->reset_state)) {
|
||||
/* PF has initmated that it is about to reset the hardware.
|
||||
@ -1885,39 +1871,24 @@ static void hclgevf_reset_service_task(struct work_struct *work)
|
||||
clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
|
||||
}
|
||||
|
||||
static void hclgevf_mailbox_service_task(struct work_struct *work)
|
||||
static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev)
|
||||
{
|
||||
struct hclgevf_dev *hdev;
|
||||
|
||||
hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
|
||||
if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
|
||||
return;
|
||||
|
||||
if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
|
||||
return;
|
||||
|
||||
clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
|
||||
|
||||
hclgevf_mbx_async_handler(hdev);
|
||||
|
||||
clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
|
||||
}
|
||||
|
||||
static void hclgevf_keep_alive_timer(struct timer_list *t)
|
||||
static void hclgevf_keep_alive(struct hclgevf_dev *hdev)
|
||||
{
|
||||
struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer);
|
||||
|
||||
schedule_work(&hdev->keep_alive_task);
|
||||
mod_timer(&hdev->keep_alive_timer, jiffies +
|
||||
HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ);
|
||||
}
|
||||
|
||||
static void hclgevf_keep_alive_task(struct work_struct *work)
|
||||
{
|
||||
struct hclgevf_dev *hdev;
|
||||
u8 respmsg;
|
||||
int ret;
|
||||
|
||||
hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
|
||||
|
||||
if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
|
||||
return;
|
||||
|
||||
@ -1928,19 +1899,32 @@ static void hclgevf_keep_alive_task(struct work_struct *work)
|
||||
"VF sends keep alive cmd failed(=%d)\n", ret);
|
||||
}
|
||||
|
||||
static void hclgevf_service_task(struct work_struct *work)
|
||||
static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
|
||||
{
|
||||
struct hnae3_handle *handle;
|
||||
struct hclgevf_dev *hdev;
|
||||
unsigned long delta = round_jiffies_relative(HZ);
|
||||
struct hnae3_handle *handle = &hdev->nic;
|
||||
|
||||
hdev = container_of(work, struct hclgevf_dev, service_task);
|
||||
handle = &hdev->nic;
|
||||
if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
|
||||
delta = jiffies - hdev->last_serv_processed;
|
||||
|
||||
if (hdev->stats_timer >= HCLGEVF_STATS_TIMER_INTERVAL) {
|
||||
hclgevf_tqps_update_stats(handle);
|
||||
hdev->stats_timer = 0;
|
||||
if (delta < round_jiffies_relative(HZ)) {
|
||||
delta = round_jiffies_relative(HZ) - delta;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
hdev->serv_processed_cnt++;
|
||||
if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL))
|
||||
hclgevf_keep_alive(hdev);
|
||||
|
||||
if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) {
|
||||
hdev->last_serv_processed = jiffies;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL))
|
||||
hclgevf_tqps_update_stats(handle);
|
||||
|
||||
/* request the link status from the PF. PF would be able to tell VF
|
||||
* about such updates in future so we might remove this later
|
||||
*/
|
||||
@ -1950,9 +1934,27 @@ static void hclgevf_service_task(struct work_struct *work)
|
||||
|
||||
hclgevf_sync_vlan_filter(hdev);
|
||||
|
||||
hclgevf_deferred_task_schedule(hdev);
|
||||
hdev->last_serv_processed = jiffies;
|
||||
|
||||
clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
|
||||
out:
|
||||
hclgevf_task_schedule(hdev, delta);
|
||||
}
|
||||
|
||||
static void hclgevf_service_task(struct work_struct *work)
|
||||
{
|
||||
struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev,
|
||||
service_task.work);
|
||||
|
||||
hclgevf_reset_service_task(hdev);
|
||||
hclgevf_mailbox_service_task(hdev);
|
||||
hclgevf_periodic_service_task(hdev);
|
||||
|
||||
/* Handle reset and mbx again in case periodical task delays the
|
||||
* handling by calling hclgevf_task_schedule() in
|
||||
* hclgevf_periodic_service_task()
|
||||
*/
|
||||
hclgevf_reset_service_task(hdev);
|
||||
hclgevf_mailbox_service_task(hdev);
|
||||
}
|
||||
|
||||
static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
|
||||
@ -2189,16 +2191,31 @@ static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
|
||||
false);
|
||||
}
|
||||
|
||||
static void hclgevf_flush_link_update(struct hclgevf_dev *hdev)
|
||||
{
|
||||
#define HCLGEVF_FLUSH_LINK_TIMEOUT 100000
|
||||
|
||||
unsigned long last = hdev->serv_processed_cnt;
|
||||
int i = 0;
|
||||
|
||||
while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) &&
|
||||
i++ < HCLGEVF_FLUSH_LINK_TIMEOUT &&
|
||||
last == hdev->serv_processed_cnt)
|
||||
usleep_range(1, 1);
|
||||
}
|
||||
|
||||
static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
|
||||
{
|
||||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||
|
||||
if (enable) {
|
||||
mod_timer(&hdev->service_timer, jiffies + HZ);
|
||||
hclgevf_task_schedule(hdev, 0);
|
||||
} else {
|
||||
del_timer_sync(&hdev->service_timer);
|
||||
cancel_work_sync(&hdev->service_task);
|
||||
clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
|
||||
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
|
||||
|
||||
/* flush memory to make sure DOWN is seen by service task */
|
||||
smp_mb__before_atomic();
|
||||
hclgevf_flush_link_update(hdev);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2245,16 +2262,12 @@ static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
|
||||
|
||||
static int hclgevf_client_start(struct hnae3_handle *handle)
|
||||
{
|
||||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||
int ret;
|
||||
|
||||
ret = hclgevf_set_alive(handle, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mod_timer(&hdev->keep_alive_timer, jiffies +
|
||||
HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2267,25 +2280,15 @@ static void hclgevf_client_stop(struct hnae3_handle *handle)
|
||||
if (ret)
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"%s failed %d\n", __func__, ret);
|
||||
|
||||
del_timer_sync(&hdev->keep_alive_timer);
|
||||
cancel_work_sync(&hdev->keep_alive_task);
|
||||
}
|
||||
|
||||
static void hclgevf_state_init(struct hclgevf_dev *hdev)
|
||||
{
|
||||
/* setup tasks for the MBX */
|
||||
INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
|
||||
clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
|
||||
clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
|
||||
clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
|
||||
|
||||
/* setup tasks for service timer */
|
||||
timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
|
||||
|
||||
INIT_WORK(&hdev->service_task, hclgevf_service_task);
|
||||
clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
|
||||
|
||||
INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task);
|
||||
INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
|
||||
|
||||
mutex_init(&hdev->mbx_resp.mbx_mutex);
|
||||
|
||||
@ -2298,18 +2301,8 @@ static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
|
||||
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
|
||||
set_bit(HCLGEVF_STATE_REMOVING, &hdev->state);
|
||||
|
||||
if (hdev->keep_alive_timer.function)
|
||||
del_timer_sync(&hdev->keep_alive_timer);
|
||||
if (hdev->keep_alive_task.func)
|
||||
cancel_work_sync(&hdev->keep_alive_task);
|
||||
if (hdev->service_timer.function)
|
||||
del_timer_sync(&hdev->service_timer);
|
||||
if (hdev->service_task.func)
|
||||
cancel_work_sync(&hdev->service_task);
|
||||
if (hdev->mbx_service_task.func)
|
||||
cancel_work_sync(&hdev->mbx_service_task);
|
||||
if (hdev->rst_service_task.func)
|
||||
cancel_work_sync(&hdev->rst_service_task);
|
||||
if (hdev->service_task.work.func)
|
||||
cancel_delayed_work_sync(&hdev->service_task);
|
||||
|
||||
mutex_destroy(&hdev->mbx_resp.mbx_mutex);
|
||||
}
|
||||
@ -2807,6 +2800,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
|
||||
dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
|
||||
HCLGEVF_DRIVER_NAME);
|
||||
|
||||
hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
|
||||
|
||||
return 0;
|
||||
|
||||
err_config:
|
||||
@ -2838,7 +2833,6 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
|
||||
static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
{
|
||||
struct pci_dev *pdev = ae_dev->pdev;
|
||||
struct hclgevf_dev *hdev;
|
||||
int ret;
|
||||
|
||||
ret = hclgevf_alloc_hdev(ae_dev);
|
||||
@ -2853,10 +2847,6 @@ static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
hdev = ae_dev->priv;
|
||||
timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0);
|
||||
INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3213,6 +3203,12 @@ static int hclgevf_init(void)
|
||||
{
|
||||
pr_info("%s is initializing\n", HCLGEVF_NAME);
|
||||
|
||||
hclgevf_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGEVF_NAME);
|
||||
if (!hclgevf_wq) {
|
||||
pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
hnae3_register_ae_algo(&ae_algovf);
|
||||
|
||||
return 0;
|
||||
@ -3221,6 +3217,7 @@ static int hclgevf_init(void)
|
||||
static void hclgevf_exit(void)
|
||||
{
|
||||
hnae3_unregister_ae_algo(&ae_algovf);
|
||||
destroy_workqueue(hclgevf_wq);
|
||||
}
|
||||
module_init(hclgevf_init);
|
||||
module_exit(hclgevf_exit);
|
||||
|
@ -142,12 +142,13 @@ enum hclgevf_states {
|
||||
HCLGEVF_STATE_REMOVING,
|
||||
HCLGEVF_STATE_NIC_REGISTERED,
|
||||
/* task states */
|
||||
HCLGEVF_STATE_SERVICE_SCHED,
|
||||
HCLGEVF_STATE_RST_SERVICE_SCHED,
|
||||
HCLGEVF_STATE_RST_HANDLING,
|
||||
HCLGEVF_STATE_MBX_SERVICE_SCHED,
|
||||
HCLGEVF_STATE_MBX_HANDLING,
|
||||
HCLGEVF_STATE_CMD_DISABLE,
|
||||
HCLGEVF_STATE_LINK_UPDATING,
|
||||
HCLGEVF_STATE_RST_FAIL,
|
||||
};
|
||||
|
||||
struct hclgevf_mac {
|
||||
@ -283,12 +284,7 @@ struct hclgevf_dev {
|
||||
struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
|
||||
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
|
||||
|
||||
struct timer_list service_timer;
|
||||
struct timer_list keep_alive_timer;
|
||||
struct work_struct service_task;
|
||||
struct work_struct keep_alive_task;
|
||||
struct work_struct rst_service_task;
|
||||
struct work_struct mbx_service_task;
|
||||
struct delayed_work service_task;
|
||||
|
||||
struct hclgevf_tqp *htqp;
|
||||
|
||||
@ -298,7 +294,8 @@ struct hclgevf_dev {
|
||||
struct hnae3_client *nic_client;
|
||||
struct hnae3_client *roce_client;
|
||||
u32 flag;
|
||||
u32 stats_timer;
|
||||
unsigned long serv_processed_cnt;
|
||||
unsigned long last_serv_processed;
|
||||
};
|
||||
|
||||
static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
|
||||
|
Loading…
Reference in New Issue
Block a user