scsi: ufs: Fix concurrency of error handler and other error recovery paths
Error recovery can be invoked from multiple code paths, including hibern8
enter/exit (from ufshcd_link_recovery), ufshcd_eh_host_reset_handler() and
eh_work scheduled from IRQ context. Ultimately, these paths are all trying
to invoke ufshcd_reset_and_restore() in either a synchronous or
asynchronous manner. This causes problems:
- If link recovery happens during ungate work, ufshcd_hold() would be
called recursively. Although commit 53c12d0ef6
("scsi: ufs: fix error
recovery after the hibern8 exit failure") fixed a deadlock due to
recursive calls of ufshcd_hold() by adding a check of eh_in_progress
into ufshcd_hold, this check allows eh_work to run in parallel while
link recovery is running.
- Similar concurrency can also happen when error recovery is invoked from
ufshcd_eh_host_reset_handler and ufshcd_link_recovery.
- Concurrency can even happen between eh_works. eh_work, currently queued
on system_wq, is allowed to have multiple instances running in parallel,
but we don't have proper protection for that.
If any of above concurrency scenarios happen, error recovery would fail and
lead ufs device and host into bad states. To fix the concurrency problem,
this change queues eh_work on a single threaded workqueue and removes link
recovery calls from the hibern8 enter/exit path. In addition, make use of
eh_work in eh_host_reset_handler instead of calling
ufshcd_reset_and_restore. This unifies the UFS error recovery mechanism.
According to the UFSHCI JEDEC spec, hibern8 enter/exit error occurs when
the link is broken. This essentially applies to any power mode change
operations (since they all use PACP_PWR cmds in UniPro layer). So, if a
power mode change operation (including AH8 enter/exit) fails, mark link
state as UIC_LINK_BROKEN_STATE and schedule the eh_work. In this case,
error handler needs to do a full reset and restore to recover the link back
to active. Before the link state is recovered to active,
ufshcd_uic_pwr_ctrl simply returns -ENOLINK to avoid more errors.
Link: https://lore.kernel.org/r/1596975355-39813-6-git-send-email-cang@codeaurora.org
Reviewed-by: Bean Huo <beanhuo@micron.com>
Reviewed-by: Asutosh Das <asutoshd@codeaurora.org>
Signed-off-by: Can Guo <cang@codeaurora.org>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
3f8af60447
commit
4db7a23605
@ -16,6 +16,7 @@ static const char *ufschd_uic_link_state_to_string(
|
|||||||
case UIC_LINK_OFF_STATE: return "OFF";
|
case UIC_LINK_OFF_STATE: return "OFF";
|
||||||
case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
|
case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
|
||||||
case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
|
case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
|
||||||
|
case UIC_LINK_BROKEN_STATE: return "BROKEN";
|
||||||
default: return "UNKNOWN";
|
default: return "UNKNOWN";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -228,6 +228,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
|
|||||||
static irqreturn_t ufshcd_intr(int irq, void *__hba);
|
static irqreturn_t ufshcd_intr(int irq, void *__hba);
|
||||||
static int ufshcd_change_power_mode(struct ufs_hba *hba,
|
static int ufshcd_change_power_mode(struct ufs_hba *hba,
|
||||||
struct ufs_pa_layer_attr *pwr_mode);
|
struct ufs_pa_layer_attr *pwr_mode);
|
||||||
|
static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
|
||||||
static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
|
static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
|
||||||
static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
|
static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
|
||||||
static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
|
static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
|
||||||
@ -1571,11 +1572,6 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
|
|||||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||||
hba->clk_gating.active_reqs++;
|
hba->clk_gating.active_reqs++;
|
||||||
|
|
||||||
if (ufshcd_eh_in_progress(hba)) {
|
|
||||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
start:
|
start:
|
||||||
switch (hba->clk_gating.state) {
|
switch (hba->clk_gating.state) {
|
||||||
case CLKS_ON:
|
case CLKS_ON:
|
||||||
@ -1653,6 +1649,7 @@ static void ufshcd_gate_work(struct work_struct *work)
|
|||||||
struct ufs_hba *hba = container_of(work, struct ufs_hba,
|
struct ufs_hba *hba = container_of(work, struct ufs_hba,
|
||||||
clk_gating.gate_work.work);
|
clk_gating.gate_work.work);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
int ret;
|
||||||
|
|
||||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||||
/*
|
/*
|
||||||
@ -1679,8 +1676,11 @@ static void ufshcd_gate_work(struct work_struct *work)
|
|||||||
|
|
||||||
/* put the link into hibern8 mode before turning off clocks */
|
/* put the link into hibern8 mode before turning off clocks */
|
||||||
if (ufshcd_can_hibern8_during_gating(hba)) {
|
if (ufshcd_can_hibern8_during_gating(hba)) {
|
||||||
if (ufshcd_uic_hibern8_enter(hba)) {
|
ret = ufshcd_uic_hibern8_enter(hba);
|
||||||
|
if (ret) {
|
||||||
hba->clk_gating.state = CLKS_ON;
|
hba->clk_gating.state = CLKS_ON;
|
||||||
|
dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
|
||||||
|
__func__, ret);
|
||||||
trace_ufshcd_clk_gating(dev_name(hba->dev),
|
trace_ufshcd_clk_gating(dev_name(hba->dev),
|
||||||
hba->clk_gating.state);
|
hba->clk_gating.state);
|
||||||
goto out;
|
goto out;
|
||||||
@ -1725,11 +1725,10 @@ static void __ufshcd_release(struct ufs_hba *hba)
|
|||||||
|
|
||||||
hba->clk_gating.active_reqs--;
|
hba->clk_gating.active_reqs--;
|
||||||
|
|
||||||
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
|
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
|
||||||
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
|
hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
|
||||||
|| ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
|
ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks ||
|
||||||
|| hba->active_uic_cmd || hba->uic_async_done
|
hba->active_uic_cmd || hba->uic_async_done)
|
||||||
|| ufshcd_eh_in_progress(hba))
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
hba->clk_gating.state = REQ_CLKS_OFF;
|
hba->clk_gating.state = REQ_CLKS_OFF;
|
||||||
@ -3750,6 +3749,10 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
|
|||||||
ufshcd_add_delay_before_dme_cmd(hba);
|
ufshcd_add_delay_before_dme_cmd(hba);
|
||||||
|
|
||||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||||
|
if (ufshcd_is_link_broken(hba)) {
|
||||||
|
ret = -ENOLINK;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
hba->uic_async_done = &uic_async_done;
|
hba->uic_async_done = &uic_async_done;
|
||||||
if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
|
if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
|
||||||
ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
|
ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
|
||||||
@ -3797,6 +3800,11 @@ out:
|
|||||||
hba->uic_async_done = NULL;
|
hba->uic_async_done = NULL;
|
||||||
if (reenable_intr)
|
if (reenable_intr)
|
||||||
ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
|
ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
|
||||||
|
if (ret) {
|
||||||
|
ufshcd_set_link_broken(hba);
|
||||||
|
ufshcd_schedule_eh_work(hba);
|
||||||
|
}
|
||||||
|
out_unlock:
|
||||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||||
mutex_unlock(&hba->uic_cmd_mutex);
|
mutex_unlock(&hba->uic_cmd_mutex);
|
||||||
|
|
||||||
@ -3866,7 +3874,7 @@ int ufshcd_link_recovery(struct ufs_hba *hba)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
|
EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
|
||||||
|
|
||||||
static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
|
static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct uic_command uic_cmd = {0};
|
struct uic_command uic_cmd = {0};
|
||||||
@ -3879,45 +3887,16 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
|
|||||||
trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
|
trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
|
||||||
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
|
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
|
||||||
|
|
||||||
if (ret) {
|
if (ret)
|
||||||
int err;
|
|
||||||
|
|
||||||
dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
|
dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
|
||||||
__func__, ret);
|
__func__, ret);
|
||||||
|
else
|
||||||
/*
|
|
||||||
* If link recovery fails then return error code returned from
|
|
||||||
* ufshcd_link_recovery().
|
|
||||||
* If link recovery succeeds then return -EAGAIN to attempt
|
|
||||||
* hibern8 enter retry again.
|
|
||||||
*/
|
|
||||||
err = ufshcd_link_recovery(hba);
|
|
||||||
if (err) {
|
|
||||||
dev_err(hba->dev, "%s: link recovery failed", __func__);
|
|
||||||
ret = err;
|
|
||||||
} else {
|
|
||||||
ret = -EAGAIN;
|
|
||||||
}
|
|
||||||
} else
|
|
||||||
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
|
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
|
||||||
POST_CHANGE);
|
POST_CHANGE);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
|
|
||||||
{
|
|
||||||
int ret = 0, retries;
|
|
||||||
|
|
||||||
for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
|
|
||||||
ret = __ufshcd_uic_hibern8_enter(hba);
|
|
||||||
if (!ret)
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
out:
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
|
int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
|
||||||
{
|
{
|
||||||
struct uic_command uic_cmd = {0};
|
struct uic_command uic_cmd = {0};
|
||||||
@ -3934,7 +3913,6 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
|
|||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
|
dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
|
||||||
__func__, ret);
|
__func__, ret);
|
||||||
ret = ufshcd_link_recovery(hba);
|
|
||||||
} else {
|
} else {
|
||||||
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
|
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
|
||||||
POST_CHANGE);
|
POST_CHANGE);
|
||||||
@ -5557,6 +5535,24 @@ out:
|
|||||||
return err_handling;
|
return err_handling;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* host lock must be held before calling this func */
|
||||||
|
static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
|
||||||
|
{
|
||||||
|
return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
|
||||||
|
(hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* host lock must be held before calling this func */
|
||||||
|
static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
|
||||||
|
{
|
||||||
|
/* handle fatal errors only when link is not in error state */
|
||||||
|
if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
|
||||||
|
hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
|
||||||
|
if (queue_work(hba->eh_wq, &hba->eh_work))
|
||||||
|
ufshcd_scsi_block_requests(hba);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ufshcd_err_handler - handle UFS errors that require s/w attention
|
* ufshcd_err_handler - handle UFS errors that require s/w attention
|
||||||
* @work: pointer to work structure
|
* @work: pointer to work structure
|
||||||
@ -5573,15 +5569,23 @@ static void ufshcd_err_handler(struct work_struct *work)
|
|||||||
|
|
||||||
hba = container_of(work, struct ufs_hba, eh_work);
|
hba = container_of(work, struct ufs_hba, eh_work);
|
||||||
|
|
||||||
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||||
|
if (hba->ufshcd_state == UFSHCD_STATE_ERROR ||
|
||||||
|
(!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
|
||||||
|
ufshcd_is_link_broken(hba)))) {
|
||||||
|
if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
|
||||||
|
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
|
||||||
|
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||||
|
ufshcd_scsi_unblock_requests(hba);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
ufshcd_set_eh_in_progress(hba);
|
||||||
|
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||||
pm_runtime_get_sync(hba->dev);
|
pm_runtime_get_sync(hba->dev);
|
||||||
ufshcd_hold(hba, false);
|
ufshcd_hold(hba, false);
|
||||||
|
|
||||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||||
if (hba->ufshcd_state == UFSHCD_STATE_RESET)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
hba->ufshcd_state = UFSHCD_STATE_RESET;
|
hba->ufshcd_state = UFSHCD_STATE_RESET;
|
||||||
ufshcd_set_eh_in_progress(hba);
|
|
||||||
|
|
||||||
/* Complete requests that have door-bell cleared by h/w */
|
/* Complete requests that have door-bell cleared by h/w */
|
||||||
ufshcd_complete_requests(hba);
|
ufshcd_complete_requests(hba);
|
||||||
@ -5593,15 +5597,15 @@ static void ufshcd_err_handler(struct work_struct *work)
|
|||||||
/* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
|
/* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
|
||||||
ret = ufshcd_quirk_dl_nac_errors(hba);
|
ret = ufshcd_quirk_dl_nac_errors(hba);
|
||||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||||
if (!ret)
|
if (!ret && !hba->force_reset && ufshcd_is_link_active(hba))
|
||||||
goto skip_err_handling;
|
goto skip_err_handling;
|
||||||
}
|
}
|
||||||
if ((hba->saved_err & INT_FATAL_ERRORS) ||
|
|
||||||
(hba->saved_err & UFSHCD_UIC_HIBERN8_MASK) ||
|
if (hba->force_reset || ufshcd_is_link_broken(hba) ||
|
||||||
|
ufshcd_is_saved_err_fatal(hba) ||
|
||||||
((hba->saved_err & UIC_ERROR) &&
|
((hba->saved_err & UIC_ERROR) &&
|
||||||
(hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
|
(hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
|
||||||
UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
|
UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
|
||||||
UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
|
|
||||||
needs_reset = true;
|
needs_reset = true;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -5655,34 +5659,25 @@ skip_pending_xfer_clear:
|
|||||||
__ufshcd_transfer_req_compl(hba,
|
__ufshcd_transfer_req_compl(hba,
|
||||||
(1UL << (hba->nutrs - 1)));
|
(1UL << (hba->nutrs - 1)));
|
||||||
|
|
||||||
|
hba->force_reset = false;
|
||||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||||
err = ufshcd_reset_and_restore(hba);
|
err = ufshcd_reset_and_restore(hba);
|
||||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||||
if (err) {
|
if (err)
|
||||||
dev_err(hba->dev, "%s: reset and restore failed\n",
|
dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
|
||||||
__func__);
|
__func__, err);
|
||||||
hba->ufshcd_state = UFSHCD_STATE_ERROR;
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
* Inform scsi mid-layer that we did reset and allow to handle
|
|
||||||
* Unit Attention properly.
|
|
||||||
*/
|
|
||||||
scsi_report_bus_reset(hba->host, 0);
|
|
||||||
hba->saved_err = 0;
|
|
||||||
hba->saved_uic_err = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
skip_err_handling:
|
skip_err_handling:
|
||||||
if (!needs_reset) {
|
if (!needs_reset) {
|
||||||
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
|
if (hba->ufshcd_state == UFSHCD_STATE_RESET)
|
||||||
|
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
|
||||||
if (hba->saved_err || hba->saved_uic_err)
|
if (hba->saved_err || hba->saved_uic_err)
|
||||||
dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
|
dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
|
||||||
__func__, hba->saved_err, hba->saved_uic_err);
|
__func__, hba->saved_err, hba->saved_uic_err);
|
||||||
}
|
}
|
||||||
|
|
||||||
ufshcd_clear_eh_in_progress(hba);
|
ufshcd_clear_eh_in_progress(hba);
|
||||||
|
|
||||||
out:
|
|
||||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||||
ufshcd_scsi_unblock_requests(hba);
|
ufshcd_scsi_unblock_requests(hba);
|
||||||
ufshcd_release(hba);
|
ufshcd_release(hba);
|
||||||
@ -5816,6 +5811,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
|
|||||||
hba->errors, ufshcd_get_upmcrs(hba));
|
hba->errors, ufshcd_get_upmcrs(hba));
|
||||||
ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err,
|
ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err,
|
||||||
hba->errors);
|
hba->errors);
|
||||||
|
ufshcd_set_link_broken(hba);
|
||||||
queue_eh_work = true;
|
queue_eh_work = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5827,30 +5823,22 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
|
|||||||
hba->saved_err |= hba->errors;
|
hba->saved_err |= hba->errors;
|
||||||
hba->saved_uic_err |= hba->uic_error;
|
hba->saved_uic_err |= hba->uic_error;
|
||||||
|
|
||||||
/* handle fatal errors only when link is functional */
|
/* dump controller state before resetting */
|
||||||
if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
|
if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
|
||||||
/* block commands from scsi mid-layer */
|
bool pr_prdt = !!(hba->saved_err &
|
||||||
ufshcd_scsi_block_requests(hba);
|
SYSTEM_BUS_FATAL_ERROR);
|
||||||
|
|
||||||
hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
|
dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
|
||||||
|
|
||||||
/* dump controller state before resetting */
|
|
||||||
if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
|
|
||||||
bool pr_prdt = !!(hba->saved_err &
|
|
||||||
SYSTEM_BUS_FATAL_ERROR);
|
|
||||||
|
|
||||||
dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
|
|
||||||
__func__, hba->saved_err,
|
__func__, hba->saved_err,
|
||||||
hba->saved_uic_err);
|
hba->saved_uic_err);
|
||||||
|
|
||||||
ufshcd_print_host_regs(hba);
|
ufshcd_print_host_regs(hba);
|
||||||
ufshcd_print_pwr_info(hba);
|
ufshcd_print_pwr_info(hba);
|
||||||
ufshcd_print_tmrs(hba, hba->outstanding_tasks);
|
ufshcd_print_tmrs(hba, hba->outstanding_tasks);
|
||||||
ufshcd_print_trs(hba, hba->outstanding_reqs,
|
ufshcd_print_trs(hba, hba->outstanding_reqs,
|
||||||
pr_prdt);
|
pr_prdt);
|
||||||
}
|
|
||||||
schedule_work(&hba->eh_work);
|
|
||||||
}
|
}
|
||||||
|
ufshcd_schedule_eh_work(hba);
|
||||||
retval |= IRQ_HANDLED;
|
retval |= IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
@ -6595,8 +6583,6 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
|
|||||||
/* Establish the link again and restore the device */
|
/* Establish the link again and restore the device */
|
||||||
err = ufshcd_probe_hba(hba, false);
|
err = ufshcd_probe_hba(hba, false);
|
||||||
|
|
||||||
if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
|
|
||||||
err = -EIO;
|
|
||||||
out:
|
out:
|
||||||
if (err)
|
if (err)
|
||||||
dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
|
dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
|
||||||
@ -6615,9 +6601,23 @@ out:
|
|||||||
*/
|
*/
|
||||||
static int ufshcd_reset_and_restore(struct ufs_hba *hba)
|
static int ufshcd_reset_and_restore(struct ufs_hba *hba)
|
||||||
{
|
{
|
||||||
|
u32 saved_err;
|
||||||
|
u32 saved_uic_err;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
unsigned long flags;
|
||||||
int retries = MAX_HOST_RESET_RETRIES;
|
int retries = MAX_HOST_RESET_RETRIES;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is a fresh start, cache and clear saved error first,
|
||||||
|
* in case new error generated during reset and restore.
|
||||||
|
*/
|
||||||
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||||
|
saved_err = hba->saved_err;
|
||||||
|
saved_uic_err = hba->saved_uic_err;
|
||||||
|
hba->saved_err = 0;
|
||||||
|
hba->saved_uic_err = 0;
|
||||||
|
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
/* Reset the attached device */
|
/* Reset the attached device */
|
||||||
ufshcd_vops_device_reset(hba);
|
ufshcd_vops_device_reset(hba);
|
||||||
@ -6625,6 +6625,18 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
|
|||||||
err = ufshcd_host_reset_and_restore(hba);
|
err = ufshcd_host_reset_and_restore(hba);
|
||||||
} while (err && --retries);
|
} while (err && --retries);
|
||||||
|
|
||||||
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||||
|
/*
|
||||||
|
* Inform scsi mid-layer that we did reset and allow to handle
|
||||||
|
* Unit Attention properly.
|
||||||
|
*/
|
||||||
|
scsi_report_bus_reset(hba->host, 0);
|
||||||
|
if (err) {
|
||||||
|
hba->saved_err |= saved_err;
|
||||||
|
hba->saved_uic_err |= saved_uic_err;
|
||||||
|
}
|
||||||
|
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -6636,48 +6648,25 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
|
|||||||
*/
|
*/
|
||||||
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
|
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
|
||||||
{
|
{
|
||||||
int err;
|
int err = SUCCESS;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct ufs_hba *hba;
|
struct ufs_hba *hba;
|
||||||
|
|
||||||
hba = shost_priv(cmd->device->host);
|
hba = shost_priv(cmd->device->host);
|
||||||
|
|
||||||
ufshcd_hold(hba, false);
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||||
/*
|
hba->force_reset = true;
|
||||||
* Check if there is any race with fatal error handling.
|
ufshcd_schedule_eh_work(hba);
|
||||||
* If so, wait for it to complete. Even though fatal error
|
dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
|
||||||
* handling does reset and restore in some cases, don't assume
|
|
||||||
* anything out of it. We are just avoiding race here.
|
|
||||||
*/
|
|
||||||
do {
|
|
||||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
|
||||||
if (!(work_pending(&hba->eh_work) ||
|
|
||||||
hba->ufshcd_state == UFSHCD_STATE_RESET ||
|
|
||||||
hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
|
|
||||||
break;
|
|
||||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
|
||||||
dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
|
|
||||||
flush_work(&hba->eh_work);
|
|
||||||
} while (1);
|
|
||||||
|
|
||||||
hba->ufshcd_state = UFSHCD_STATE_RESET;
|
|
||||||
ufshcd_set_eh_in_progress(hba);
|
|
||||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||||
|
|
||||||
err = ufshcd_reset_and_restore(hba);
|
flush_work(&hba->eh_work);
|
||||||
|
|
||||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||||
if (!err) {
|
if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
|
||||||
err = SUCCESS;
|
|
||||||
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
|
|
||||||
} else {
|
|
||||||
err = FAILED;
|
err = FAILED;
|
||||||
hba->ufshcd_state = UFSHCD_STATE_ERROR;
|
|
||||||
}
|
|
||||||
ufshcd_clear_eh_in_progress(hba);
|
|
||||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||||
|
|
||||||
ufshcd_release(hba);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -7398,6 +7387,7 @@ out:
|
|||||||
static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
|
static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
unsigned long flags;
|
||||||
ktime_t start = ktime_get();
|
ktime_t start = ktime_get();
|
||||||
|
|
||||||
ret = ufshcd_link_startup(hba);
|
ret = ufshcd_link_startup(hba);
|
||||||
@ -7462,14 +7452,17 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
|
|||||||
*/
|
*/
|
||||||
ufshcd_set_active_icc_lvl(hba);
|
ufshcd_set_active_icc_lvl(hba);
|
||||||
|
|
||||||
/* set the state as operational after switching to desired gear */
|
|
||||||
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
|
|
||||||
|
|
||||||
ufshcd_wb_config(hba);
|
ufshcd_wb_config(hba);
|
||||||
/* Enable Auto-Hibernate if configured */
|
/* Enable Auto-Hibernate if configured */
|
||||||
ufshcd_auto_hibern8_enable(hba);
|
ufshcd_auto_hibern8_enable(hba);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||||
|
if (ret)
|
||||||
|
hba->ufshcd_state = UFSHCD_STATE_ERROR;
|
||||||
|
else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
|
||||||
|
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
|
||||||
|
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||||
|
|
||||||
trace_ufshcd_init(dev_name(hba->dev), ret,
|
trace_ufshcd_init(dev_name(hba->dev), ret,
|
||||||
ktime_to_us(ktime_sub(ktime_get(), start)),
|
ktime_to_us(ktime_sub(ktime_get(), start)),
|
||||||
@ -8076,10 +8069,13 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
|
|||||||
|
|
||||||
if (req_link_state == UIC_LINK_HIBERN8_STATE) {
|
if (req_link_state == UIC_LINK_HIBERN8_STATE) {
|
||||||
ret = ufshcd_uic_hibern8_enter(hba);
|
ret = ufshcd_uic_hibern8_enter(hba);
|
||||||
if (!ret)
|
if (!ret) {
|
||||||
ufshcd_set_link_hibern8(hba);
|
ufshcd_set_link_hibern8(hba);
|
||||||
else
|
} else {
|
||||||
|
dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
|
||||||
|
__func__, ret);
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* If autobkops is enabled, link can't be turned off because
|
* If autobkops is enabled, link can't be turned off because
|
||||||
@ -8095,8 +8091,11 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
|
|||||||
* unipro. But putting the link in hibern8 is much faster.
|
* unipro. But putting the link in hibern8 is much faster.
|
||||||
*/
|
*/
|
||||||
ret = ufshcd_uic_hibern8_enter(hba);
|
ret = ufshcd_uic_hibern8_enter(hba);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
|
||||||
|
__func__, ret);
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* Change controller state to "reset state" which
|
* Change controller state to "reset state" which
|
||||||
* should also put the link in off/reset state
|
* should also put the link in off/reset state
|
||||||
@ -8416,10 +8415,13 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
|
|||||||
|
|
||||||
if (ufshcd_is_link_hibern8(hba)) {
|
if (ufshcd_is_link_hibern8(hba)) {
|
||||||
ret = ufshcd_uic_hibern8_exit(hba);
|
ret = ufshcd_uic_hibern8_exit(hba);
|
||||||
if (!ret)
|
if (!ret) {
|
||||||
ufshcd_set_link_active(hba);
|
ufshcd_set_link_active(hba);
|
||||||
else
|
} else {
|
||||||
|
dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
|
||||||
|
__func__, ret);
|
||||||
goto vendor_suspend;
|
goto vendor_suspend;
|
||||||
|
}
|
||||||
} else if (ufshcd_is_link_off(hba)) {
|
} else if (ufshcd_is_link_off(hba)) {
|
||||||
/*
|
/*
|
||||||
* A full initialization of the host and the device is
|
* A full initialization of the host and the device is
|
||||||
@ -8793,6 +8795,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
|
|||||||
int err;
|
int err;
|
||||||
struct Scsi_Host *host = hba->host;
|
struct Scsi_Host *host = hba->host;
|
||||||
struct device *dev = hba->dev;
|
struct device *dev = hba->dev;
|
||||||
|
char eh_wq_name[sizeof("ufs_eh_wq_00")];
|
||||||
|
|
||||||
if (!mmio_base) {
|
if (!mmio_base) {
|
||||||
dev_err(hba->dev,
|
dev_err(hba->dev,
|
||||||
@ -8854,6 +8857,15 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
|
|||||||
hba->max_pwr_info.is_valid = false;
|
hba->max_pwr_info.is_valid = false;
|
||||||
|
|
||||||
/* Initialize work queues */
|
/* Initialize work queues */
|
||||||
|
snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
|
||||||
|
hba->host->host_no);
|
||||||
|
hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
|
||||||
|
if (!hba->eh_wq) {
|
||||||
|
dev_err(hba->dev, "%s: failed to create eh workqueue\n",
|
||||||
|
__func__);
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto out_disable;
|
||||||
|
}
|
||||||
INIT_WORK(&hba->eh_work, ufshcd_err_handler);
|
INIT_WORK(&hba->eh_work, ufshcd_err_handler);
|
||||||
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
|
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
|
||||||
|
|
||||||
|
@ -90,6 +90,7 @@ enum uic_link_state {
|
|||||||
UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */
|
UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */
|
||||||
UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */
|
UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */
|
||||||
UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */
|
UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */
|
||||||
|
UIC_LINK_BROKEN_STATE = 3, /* Link is in broken state */
|
||||||
};
|
};
|
||||||
|
|
||||||
#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
|
#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
|
||||||
@ -97,11 +98,15 @@ enum uic_link_state {
|
|||||||
UIC_LINK_ACTIVE_STATE)
|
UIC_LINK_ACTIVE_STATE)
|
||||||
#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
|
#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
|
||||||
UIC_LINK_HIBERN8_STATE)
|
UIC_LINK_HIBERN8_STATE)
|
||||||
|
#define ufshcd_is_link_broken(hba) ((hba)->uic_link_state == \
|
||||||
|
UIC_LINK_BROKEN_STATE)
|
||||||
#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
|
#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
|
||||||
#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
|
#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
|
||||||
UIC_LINK_ACTIVE_STATE)
|
UIC_LINK_ACTIVE_STATE)
|
||||||
#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
|
#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
|
||||||
UIC_LINK_HIBERN8_STATE)
|
UIC_LINK_HIBERN8_STATE)
|
||||||
|
#define ufshcd_set_link_broken(hba) ((hba)->uic_link_state = \
|
||||||
|
UIC_LINK_BROKEN_STATE)
|
||||||
|
|
||||||
#define ufshcd_set_ufs_dev_active(h) \
|
#define ufshcd_set_ufs_dev_active(h) \
|
||||||
((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
|
((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
|
||||||
@ -616,12 +621,14 @@ struct ufs_hba_variant_params {
|
|||||||
* @intr_mask: Interrupt Mask Bits
|
* @intr_mask: Interrupt Mask Bits
|
||||||
* @ee_ctrl_mask: Exception event control mask
|
* @ee_ctrl_mask: Exception event control mask
|
||||||
* @is_powered: flag to check if HBA is powered
|
* @is_powered: flag to check if HBA is powered
|
||||||
|
* @eh_wq: Workqueue that eh_work works on
|
||||||
* @eh_work: Worker to handle UFS errors that require s/w attention
|
* @eh_work: Worker to handle UFS errors that require s/w attention
|
||||||
* @eeh_work: Worker to handle exception events
|
* @eeh_work: Worker to handle exception events
|
||||||
* @errors: HBA errors
|
* @errors: HBA errors
|
||||||
* @uic_error: UFS interconnect layer error status
|
* @uic_error: UFS interconnect layer error status
|
||||||
* @saved_err: sticky error mask
|
* @saved_err: sticky error mask
|
||||||
* @saved_uic_err: sticky UIC error mask
|
* @saved_uic_err: sticky UIC error mask
|
||||||
|
* @force_reset: flag to force eh_work perform a full reset
|
||||||
* @silence_err_logs: flag to silence error logs
|
* @silence_err_logs: flag to silence error logs
|
||||||
* @dev_cmd: ufs device management command information
|
* @dev_cmd: ufs device management command information
|
||||||
* @last_dme_cmd_tstamp: time stamp of the last completed DME command
|
* @last_dme_cmd_tstamp: time stamp of the last completed DME command
|
||||||
@ -710,6 +717,7 @@ struct ufs_hba {
|
|||||||
bool is_powered;
|
bool is_powered;
|
||||||
|
|
||||||
/* Work Queues */
|
/* Work Queues */
|
||||||
|
struct workqueue_struct *eh_wq;
|
||||||
struct work_struct eh_work;
|
struct work_struct eh_work;
|
||||||
struct work_struct eeh_work;
|
struct work_struct eeh_work;
|
||||||
|
|
||||||
@ -719,6 +727,7 @@ struct ufs_hba {
|
|||||||
u32 saved_err;
|
u32 saved_err;
|
||||||
u32 saved_uic_err;
|
u32 saved_uic_err;
|
||||||
struct ufs_stats ufs_stats;
|
struct ufs_stats ufs_stats;
|
||||||
|
bool force_reset;
|
||||||
bool silence_err_logs;
|
bool silence_err_logs;
|
||||||
|
|
||||||
/* Device management request data */
|
/* Device management request data */
|
||||||
|
Loading…
Reference in New Issue
Block a user