net/mlx5: Handle sync reset unload event

Added a new event handler to firmware sync reset, which is used to
support firmware sync reset flow on smart NIC. Adding this new stage to
the flow enables the firmware to ensure host PFs unload before ECPFs
unload, to avoid race of PFs recovery.

If firmware sends sync_reset_unload event to driver the driver should
unload and close all HW resources of the function. Once the driver
finishes unloading part, it can't get any more events from firmware as
event queues are closed, so it polls the reset state field to know when
to continue to next stage of the sync reset flow.

Added capability bit for supporting sync_reset_unload event.

Signed-off-by: Moshe Shemesh <moshe@nvidia.com>
Reviewed-by: Shay Drory <shayd@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
Moshe Shemesh 2023-05-17 16:07:40 +03:00 committed by Saeed Mahameed
parent 6f8551f8d9
commit 7a9770f1bf
4 changed files with 104 additions and 6 deletions

View File

@ -21,6 +21,7 @@ struct mlx5_fw_reset {
struct workqueue_struct *wq;
struct work_struct fw_live_patch_work;
struct work_struct reset_request_work;
struct work_struct reset_unload_work;
struct work_struct reset_reload_work;
struct work_struct reset_now_work;
struct work_struct reset_abort_work;
@ -30,6 +31,26 @@ struct mlx5_fw_reset {
int ret;
};
enum {
MLX5_FW_RST_STATE_IDLE = 0,
MLX5_FW_RST_STATE_TOGGLE_REQ = 4,
};
enum {
MLX5_RST_STATE_BIT_NUM = 12,
MLX5_RST_ACK_BIT_NUM = 22,
};
static u8 mlx5_get_fw_rst_state(struct mlx5_core_dev *dev)
{
return (ioread32be(&dev->iseg->initializing) >> MLX5_RST_STATE_BIT_NUM) & 0xF;
}
static void mlx5_set_fw_rst_ack(struct mlx5_core_dev *dev)
{
iowrite32be(BIT(MLX5_RST_ACK_BIT_NUM), &dev->iseg->initializing);
}
static int mlx5_fw_reset_enable_remote_dev_reset_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
@ -155,7 +176,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL0, 0, 0, false);
}
static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unloaded)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
@ -163,7 +184,8 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
complete(&fw_reset->done);
} else {
mlx5_unload_one(dev, false);
if (!unloaded)
mlx5_unload_one(dev, false);
if (mlx5_health_wait_pci_up(dev))
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
else
@ -204,7 +226,7 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
mlx5_sync_reset_clear_reset_requested(dev, false);
mlx5_enter_error_state(dev, true);
mlx5_fw_reset_complete_reload(dev);
mlx5_fw_reset_complete_reload(dev, false);
}
#define MLX5_RESET_POLL_INTERVAL (HZ / 10)
@ -458,7 +480,70 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
mlx5_enter_error_state(dev, true);
done:
fw_reset->ret = err;
mlx5_fw_reset_complete_reload(dev);
mlx5_fw_reset_complete_reload(dev, false);
}
static void mlx5_sync_reset_unload_event(struct work_struct *work)
{
struct mlx5_fw_reset *fw_reset;
struct mlx5_core_dev *dev;
unsigned long timeout;
bool reset_action;
u8 rst_state;
int err;
fw_reset = container_of(work, struct mlx5_fw_reset, reset_unload_work);
dev = fw_reset->dev;
if (mlx5_sync_reset_clear_reset_requested(dev, false))
return;
mlx5_core_warn(dev, "Sync Reset Unload. Function is forced down.\n");
err = mlx5_cmd_fast_teardown_hca(dev);
if (err)
mlx5_core_warn(dev, "Fast teardown failed, unloading, err %d\n", err);
else
mlx5_enter_error_state(dev, true);
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags))
mlx5_unload_one_devl_locked(dev, false);
else
mlx5_unload_one(dev, false);
mlx5_set_fw_rst_ack(dev);
mlx5_core_warn(dev, "Sync Reset Unload done, device reset expected\n");
reset_action = false;
timeout = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, RESET_UNLOAD));
do {
rst_state = mlx5_get_fw_rst_state(dev);
if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ ||
rst_state == MLX5_FW_RST_STATE_IDLE) {
reset_action = true;
break;
}
msleep(20);
} while (!time_after(jiffies, timeout));
if (!reset_action) {
mlx5_core_err(dev, "Got timeout waiting for sync reset action, state = %u\n",
rst_state);
fw_reset->ret = -ETIMEDOUT;
goto done;
}
mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n", rst_state);
if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) {
err = mlx5_pci_link_toggle(dev);
if (err) {
mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, err %d\n", err);
fw_reset->ret = err;
}
}
done:
mlx5_fw_reset_complete_reload(dev, true);
}
static void mlx5_sync_reset_abort_event(struct work_struct *work)
@ -483,6 +568,9 @@ static void mlx5_sync_reset_events_handle(struct mlx5_fw_reset *fw_reset, struct
case MLX5_SYNC_RST_STATE_RESET_REQUEST:
queue_work(fw_reset->wq, &fw_reset->reset_request_work);
break;
case MLX5_SYNC_RST_STATE_RESET_UNLOAD:
queue_work(fw_reset->wq, &fw_reset->reset_unload_work);
break;
case MLX5_SYNC_RST_STATE_RESET_NOW:
queue_work(fw_reset->wq, &fw_reset->reset_now_work);
break;
@ -517,10 +605,13 @@ static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long acti
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev)
{
unsigned long pci_sync_update_timeout = mlx5_tout_ms(dev, PCI_SYNC_UPDATE);
unsigned long timeout = msecs_to_jiffies(pci_sync_update_timeout);
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
unsigned long timeout;
int err;
if (MLX5_CAP_GEN(dev, pci_sync_for_fw_update_with_driver_unload))
pci_sync_update_timeout += mlx5_tout_ms(dev, RESET_UNLOAD);
timeout = msecs_to_jiffies(pci_sync_update_timeout);
if (!wait_for_completion_timeout(&fw_reset->done, timeout)) {
mlx5_core_warn(dev, "FW sync reset timeout after %lu seconds\n",
pci_sync_update_timeout / 1000);
@ -557,6 +648,7 @@ void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
cancel_work_sync(&fw_reset->fw_live_patch_work);
cancel_work_sync(&fw_reset->reset_request_work);
cancel_work_sync(&fw_reset->reset_unload_work);
cancel_work_sync(&fw_reset->reset_reload_work);
cancel_work_sync(&fw_reset->reset_now_work);
cancel_work_sync(&fw_reset->reset_abort_work);
@ -595,6 +687,7 @@ int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
INIT_WORK(&fw_reset->fw_live_patch_work, mlx5_fw_live_patch_event);
INIT_WORK(&fw_reset->reset_request_work, mlx5_sync_reset_request_event);
INIT_WORK(&fw_reset->reset_unload_work, mlx5_sync_reset_unload_event);
INIT_WORK(&fw_reset->reset_reload_work, mlx5_sync_reset_reload_work);
INIT_WORK(&fw_reset->reset_now_work, mlx5_sync_reset_now_event);
INIT_WORK(&fw_reset->reset_abort_work, mlx5_sync_reset_abort_event);

View File

@ -619,6 +619,9 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_event))
MLX5_SET(cmd_hca_cap, set_hca_cap, pci_sync_for_fw_update_event, 1);
if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_with_driver_unload))
MLX5_SET(cmd_hca_cap, set_hca_cap,
pci_sync_for_fw_update_with_driver_unload, 1);
if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
MLX5_SET(cmd_hca_cap,

View File

@ -716,6 +716,7 @@ enum sync_rst_state_type {
MLX5_SYNC_RST_STATE_RESET_REQUEST = 0x0,
MLX5_SYNC_RST_STATE_RESET_NOW = 0x1,
MLX5_SYNC_RST_STATE_RESET_ABORT = 0x2,
MLX5_SYNC_RST_STATE_RESET_UNLOAD = 0x3,
};
struct mlx5_eqe_sync_fw_update {

View File

@ -1755,7 +1755,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_328[0x2];
u8 relaxed_ordering_read[0x1];
u8 log_max_pd[0x5];
u8 reserved_at_330[0x7];
u8 reserved_at_330[0x6];
u8 pci_sync_for_fw_update_with_driver_unload[0x1];
u8 vnic_env_cnt_steering_fail[0x1];
u8 reserved_at_338[0x1];
u8 q_counter_aggregation[0x1];