forked from Minki/linux
scsi: core: pm: Rely on the device driver core for async power management
Instead of implementing asynchronous resume support in the SCSI core, rely on the device driver core for resuming SCSI devices asynchronously. Instead of only supporting asynchronous resumes, also support asynchronous suspends. Link: https://lore.kernel.org/r/20211006215453.3318929-2-bvanassche@acm.org Cc: Alan Stern <stern@rowland.harvard.edu> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Hannes Reinecke <hare@suse.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Martin Kepplinger <martin.kepplinger@puri.sm> Signed-off-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
a4bcbf7191
commit
a19a93e4c6
@ -475,6 +475,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
|
||||
dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
|
||||
shost->shost_gendev.bus = &scsi_bus_type;
|
||||
shost->shost_gendev.type = &scsi_host_type;
|
||||
scsi_enable_async_suspend(&shost->shost_gendev);
|
||||
|
||||
device_initialize(&shost->shost_dev);
|
||||
shost->shost_dev.parent = &shost->shost_gendev;
|
||||
|
@ -86,14 +86,6 @@ unsigned int scsi_logging_level;
|
||||
EXPORT_SYMBOL(scsi_logging_level);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Domain for asynchronous system resume operations. It is marked 'exclusive'
|
||||
* to avoid being included in the async_synchronize_full() that is invoked by
|
||||
* dpm_resume().
|
||||
*/
|
||||
ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
|
||||
EXPORT_SYMBOL(scsi_sd_pm_domain);
|
||||
|
||||
#ifdef CONFIG_SCSI_LOGGING
|
||||
void scsi_log_send(struct scsi_cmnd *cmd)
|
||||
{
|
||||
|
@ -56,9 +56,6 @@ static int scsi_dev_type_suspend(struct device *dev,
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
int err;
|
||||
|
||||
/* flush pending in-flight resume operations, suspend is synchronous */
|
||||
async_synchronize_full_domain(&scsi_sd_pm_domain);
|
||||
|
||||
err = scsi_device_quiesce(to_scsi_device(dev));
|
||||
if (err == 0) {
|
||||
err = cb(dev, pm);
|
||||
@ -123,48 +120,11 @@ scsi_bus_suspend_common(struct device *dev,
|
||||
return err;
|
||||
}
|
||||
|
||||
static void async_sdev_resume(void *dev, async_cookie_t cookie)
|
||||
{
|
||||
scsi_dev_type_resume(dev, do_scsi_resume);
|
||||
}
|
||||
|
||||
static void async_sdev_thaw(void *dev, async_cookie_t cookie)
|
||||
{
|
||||
scsi_dev_type_resume(dev, do_scsi_thaw);
|
||||
}
|
||||
|
||||
static void async_sdev_restore(void *dev, async_cookie_t cookie)
|
||||
{
|
||||
scsi_dev_type_resume(dev, do_scsi_restore);
|
||||
}
|
||||
|
||||
static int scsi_bus_resume_common(struct device *dev,
|
||||
int (*cb)(struct device *, const struct dev_pm_ops *))
|
||||
{
|
||||
async_func_t fn;
|
||||
|
||||
if (!scsi_is_sdev_device(dev))
|
||||
fn = NULL;
|
||||
else if (cb == do_scsi_resume)
|
||||
fn = async_sdev_resume;
|
||||
else if (cb == do_scsi_thaw)
|
||||
fn = async_sdev_thaw;
|
||||
else if (cb == do_scsi_restore)
|
||||
fn = async_sdev_restore;
|
||||
else
|
||||
fn = NULL;
|
||||
|
||||
if (fn) {
|
||||
async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
|
||||
|
||||
/*
|
||||
* If a user has disabled async probing a likely reason
|
||||
* is due to a storage enclosure that does not inject
|
||||
* staggered spin-ups. For safety, make resume
|
||||
* synchronous as well in that case.
|
||||
*/
|
||||
if (strncmp(scsi_scan_type, "async", 5) != 0)
|
||||
async_synchronize_full_domain(&scsi_sd_pm_domain);
|
||||
if (scsi_is_sdev_device(dev)) {
|
||||
scsi_dev_type_resume(dev, cb);
|
||||
} else {
|
||||
pm_runtime_disable(dev);
|
||||
pm_runtime_set_active(dev);
|
||||
|
@ -116,7 +116,7 @@ extern void scsi_exit_procfs(void);
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
/* scsi_scan.c */
|
||||
extern char scsi_scan_type[];
|
||||
void scsi_enable_async_suspend(struct device *dev);
|
||||
extern int scsi_complete_async_scans(void);
|
||||
extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
|
||||
unsigned int, u64, enum scsi_scan_mode);
|
||||
@ -170,8 +170,6 @@ static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; }
|
||||
static inline void scsi_autopm_put_host(struct Scsi_Host *h) {}
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
extern struct async_domain scsi_sd_pm_domain;
|
||||
|
||||
/* scsi_dh.c */
|
||||
#ifdef CONFIG_SCSI_DH
|
||||
void scsi_dh_add_device(struct scsi_device *sdev);
|
||||
|
@ -122,6 +122,22 @@ struct async_scan_data {
|
||||
struct completion prev_finished;
|
||||
};
|
||||
|
||||
/**
|
||||
* scsi_enable_async_suspend - Enable async suspend and resume
|
||||
*/
|
||||
void scsi_enable_async_suspend(struct device *dev)
|
||||
{
|
||||
/*
|
||||
* If a user has disabled async probing a likely reason is due to a
|
||||
* storage enclosure that does not inject staggered spin-ups. For
|
||||
* safety, make resume synchronous as well in that case.
|
||||
*/
|
||||
if (strncmp(scsi_scan_type, "async", 5) != 0)
|
||||
return;
|
||||
/* Enable asynchronous suspend and resume. */
|
||||
device_enable_async_suspend(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* scsi_complete_async_scans - Wait for asynchronous scans to complete
|
||||
*
|
||||
@ -454,6 +470,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
|
||||
dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
|
||||
dev->bus = &scsi_bus_type;
|
||||
dev->type = &scsi_target_type;
|
||||
scsi_enable_async_suspend(dev);
|
||||
starget->id = id;
|
||||
starget->channel = channel;
|
||||
starget->can_queue = 0;
|
||||
|
@ -1616,6 +1616,7 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
|
||||
device_initialize(&sdev->sdev_gendev);
|
||||
sdev->sdev_gendev.bus = &scsi_bus_type;
|
||||
sdev->sdev_gendev.type = &scsi_dev_type;
|
||||
scsi_enable_async_suspend(&sdev->sdev_gendev);
|
||||
dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%llu",
|
||||
sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
|
||||
|
||||
|
@ -3504,7 +3504,6 @@ static int sd_remove(struct device *dev)
|
||||
sdkp = dev_get_drvdata(dev);
|
||||
scsi_autopm_get_device(sdkp->device);
|
||||
|
||||
async_synchronize_full_domain(&scsi_sd_pm_domain);
|
||||
device_del(&sdkp->dev);
|
||||
del_gendisk(sdkp->disk);
|
||||
sd_shutdown(dev);
|
||||
|
Loading…
Reference in New Issue
Block a user