drm/amd: Evict resources during PM ops prepare() callback

Linux PM core has a prepare() callback run before suspend.

If the system is under high memory pressure, the resources may need
to be evicted into swap instead.  If the storage backing for swap
is offlined during the suspend() step then such a call may fail.

So move this step into prepare() to move evict majority of
resources and update all non-pmops callers to call the same callback.

Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2362
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Mario Limonciello 2023-10-06 13:50:20 -05:00 committed by Alex Deucher
parent 31715a8620
commit 5095d54181
3 changed files with 34 additions and 8 deletions

View File

@ -1408,6 +1408,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
void amdgpu_driver_release_kms(struct drm_device *dev);
int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
int amdgpu_device_prepare(struct drm_device *dev);
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);

View File

@ -1760,6 +1760,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
} else {
pr_info("switched off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
amdgpu_device_prepare(dev);
amdgpu_device_suspend(dev, true);
amdgpu_device_cache_pci_state(pdev);
/* Shut down the device */
@ -4335,6 +4336,31 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
/*
* Suspend & resume.
*/
/**
* amdgpu_device_prepare - prepare for device suspend
*
* @dev: drm dev pointer
*
* Prepare to put the hw in the suspend state (all asics).
* Returns 0 for success or an error on failure.
* Called at driver suspend.
*/
int amdgpu_device_prepare(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
int r;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
/* Evict the majority of BOs before starting suspend sequence */
r = amdgpu_device_evict_resources(adev);
if (r)
return r;
return 0;
}
/**
* amdgpu_device_suspend - initiate device suspend
*
@ -4355,11 +4381,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
adev->in_suspend = true;
/* Evict the majority of BOs before grabbing the full access */
r = amdgpu_device_evict_resources(adev);
if (r)
return r;
if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_fini_data_exchange(adev);
r = amdgpu_virt_request_full_gpu(adev, false);

View File

@ -2427,8 +2427,9 @@ static int amdgpu_pmops_prepare(struct device *dev)
/* Return a positive number here so
* DPM_FLAG_SMART_SUSPEND works properly
*/
if (amdgpu_device_supports_boco(drm_dev))
return pm_runtime_suspended(dev);
if (amdgpu_device_supports_boco(drm_dev) &&
pm_runtime_suspended(dev))
return 1;
/* if we will not support s3 or s2i for the device
* then skip suspend
@ -2437,7 +2438,7 @@ static int amdgpu_pmops_prepare(struct device *dev)
!amdgpu_acpi_is_s3_active(adev))
return 1;
return 0;
return amdgpu_device_prepare(drm_dev);
}
static void amdgpu_pmops_complete(struct device *dev)
@ -2637,6 +2638,9 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
if (amdgpu_device_supports_boco(drm_dev))
adev->mp1_state = PP_MP1_STATE_UNLOAD;
ret = amdgpu_device_prepare(drm_dev);
if (ret)
return ret;
ret = amdgpu_device_suspend(drm_dev, false);
if (ret) {
adev->in_runpm = false;