linux/drivers/scsi/ufs/ufs-debugfs.c
Asutosh Das b294ff3e34 scsi: ufs: core: Enable power management for wlun
During runtime-suspend of ufs host, the SCSI devices are already suspended
and so are the queues associated with them. However, the ufs host sends SSU
(START_STOP_UNIT) to the wlun during runtime-suspend.

During the process blk_queue_enter() checks if the queue is not in suspended
state. If so, it waits for the queue to resume, and never comes out of
it. Commit 52abca64fd ("scsi: block: Do not accept any requests while
suspended") adds the check to see if the queue is in suspended state in
blk_queue_enter().

Call trace:
 __switch_to+0x174/0x2c4
 __schedule+0x478/0x764
 schedule+0x9c/0xe0
 blk_queue_enter+0x158/0x228
 blk_mq_alloc_request+0x40/0xa4
 blk_get_request+0x2c/0x70
 __scsi_execute+0x60/0x1c4
 ufshcd_set_dev_pwr_mode+0x124/0x1e4
 ufshcd_suspend+0x208/0x83c
 ufshcd_runtime_suspend+0x40/0x154
 ufshcd_pltfrm_runtime_suspend+0x14/0x20
 pm_generic_runtime_suspend+0x28/0x3c
 __rpm_callback+0x80/0x2a4
 rpm_suspend+0x308/0x614
 rpm_idle+0x158/0x228
 pm_runtime_work+0x84/0xac
 process_one_work+0x1f0/0x470
 worker_thread+0x26c/0x4c8
 kthread+0x13c/0x320
 ret_from_fork+0x10/0x18

Fix this by registering ufs device wlun as a SCSI driver and registering it
for block runtime-pm. Also make this a supplier for all other LUNs. This
way the wlun device suspends after all the consumers and resumes after HBA
resumes. This also registers a new SCSI driver for rpmb wlun. This new
driver is mostly used to clear rpmb uac.

[mkp: resolve merge conflict with 5.13-rc1 and fix doc warning]

Fixed smatch warnings:
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>

Link: https://lore.kernel.org/r/4662c462e79e3e7f541f54f88f8993f421026d83.1619223249.git.asutoshd@codeaurora.org
Reviewed-by: Adrian Hunter <adrian.hunter@intel.com>
Co-developed-by: Can Guo <cang@codeaurora.org>
Signed-off-by: Can Guo <cang@codeaurora.org>
Signed-off-by: Asutosh Das <asutoshd@codeaurora.org>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-05-10 22:28:20 -04:00

147 lines
3.9 KiB
C

// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2020 Intel Corporation
#include <linux/debugfs.h>
#include "ufs-debugfs.h"
#include "ufshcd.h"
static struct dentry *ufs_debugfs_root;
void __init ufs_debugfs_init(void)
{
ufs_debugfs_root = debugfs_create_dir("ufshcd", NULL);
}
void ufs_debugfs_exit(void)
{
debugfs_remove_recursive(ufs_debugfs_root);
}
static int ufs_debugfs_stats_show(struct seq_file *s, void *data)
{
struct ufs_hba *hba = s->private;
struct ufs_event_hist *e = hba->ufs_stats.event;
#define PRT(fmt, typ) \
seq_printf(s, fmt, e[UFS_EVT_ ## typ].cnt)
PRT("PHY Adapter Layer errors (except LINERESET): %llu\n", PA_ERR);
PRT("Data Link Layer errors: %llu\n", DL_ERR);
PRT("Network Layer errors: %llu\n", NL_ERR);
PRT("Transport Layer errors: %llu\n", TL_ERR);
PRT("Generic DME errors: %llu\n", DME_ERR);
PRT("Auto-hibernate errors: %llu\n", AUTO_HIBERN8_ERR);
PRT("IS Fatal errors (CEFES, SBFES, HCFES, DFES): %llu\n", FATAL_ERR);
PRT("DME Link Startup errors: %llu\n", LINK_STARTUP_FAIL);
PRT("PM Resume errors: %llu\n", RESUME_ERR);
PRT("PM Suspend errors : %llu\n", SUSPEND_ERR);
PRT("Logical Unit Resets: %llu\n", DEV_RESET);
PRT("Host Resets: %llu\n", HOST_RESET);
PRT("SCSI command aborts: %llu\n", ABORT);
#undef PRT
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ufs_debugfs_stats);
static int ee_usr_mask_get(void *data, u64 *val)
{
struct ufs_hba *hba = data;
*val = hba->ee_usr_mask;
return 0;
}
static int ufs_debugfs_get_user_access(struct ufs_hba *hba)
__acquires(&hba->host_sem)
{
down(&hba->host_sem);
if (!ufshcd_is_user_access_allowed(hba)) {
up(&hba->host_sem);
return -EBUSY;
}
ufshcd_rpm_get_sync(hba);
return 0;
}
static void ufs_debugfs_put_user_access(struct ufs_hba *hba)
__releases(&hba->host_sem)
{
ufshcd_rpm_put_sync(hba);
up(&hba->host_sem);
}
static int ee_usr_mask_set(void *data, u64 val)
{
struct ufs_hba *hba = data;
int err;
if (val & ~(u64)MASK_EE_STATUS)
return -EINVAL;
err = ufs_debugfs_get_user_access(hba);
if (err)
return err;
err = ufshcd_update_ee_usr_mask(hba, val, MASK_EE_STATUS);
ufs_debugfs_put_user_access(hba);
return err;
}
DEFINE_DEBUGFS_ATTRIBUTE(ee_usr_mask_fops, ee_usr_mask_get, ee_usr_mask_set, "%#llx\n");
void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status)
{
bool chgd = false;
u16 ee_ctrl_mask;
int err = 0;
if (!hba->debugfs_ee_rate_limit_ms || !status)
return;
mutex_lock(&hba->ee_ctrl_mutex);
ee_ctrl_mask = hba->ee_drv_mask | (hba->ee_usr_mask & ~status);
chgd = ee_ctrl_mask != hba->ee_ctrl_mask;
if (chgd) {
err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
if (err)
dev_err(hba->dev, "%s: failed to write ee control %d\n",
__func__, err);
}
mutex_unlock(&hba->ee_ctrl_mutex);
if (chgd && !err) {
unsigned long delay = msecs_to_jiffies(hba->debugfs_ee_rate_limit_ms);
queue_delayed_work(system_freezable_wq, &hba->debugfs_ee_work, delay);
}
}
static void ufs_debugfs_restart_ee(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba, debugfs_ee_work.work);
if (!hba->ee_usr_mask || pm_runtime_suspended(hba->dev) ||
ufs_debugfs_get_user_access(hba))
return;
ufshcd_write_ee_control(hba);
ufs_debugfs_put_user_access(hba);
}
void ufs_debugfs_hba_init(struct ufs_hba *hba)
{
/* Set default exception event rate limit period to 20ms */
hba->debugfs_ee_rate_limit_ms = 20;
INIT_DELAYED_WORK(&hba->debugfs_ee_work, ufs_debugfs_restart_ee);
hba->debugfs_root = debugfs_create_dir(dev_name(hba->dev), ufs_debugfs_root);
debugfs_create_file("stats", 0400, hba->debugfs_root, hba, &ufs_debugfs_stats_fops);
debugfs_create_file("exception_event_mask", 0600, hba->debugfs_root,
hba, &ee_usr_mask_fops);
debugfs_create_u32("exception_event_rate_limit_ms", 0600, hba->debugfs_root,
&hba->debugfs_ee_rate_limit_ms);
}
void ufs_debugfs_hba_exit(struct ufs_hba *hba)
{
debugfs_remove_recursive(hba->debugfs_root);
cancel_delayed_work_sync(&hba->debugfs_ee_work);
}