drm/amd/pm: drop redundant and unneeded BACO APIs V2
Use other APIs which are with the same functionality but much more clean. V2: drop mediate unneeded interface Signed-off-by: Evan Quan <evan.quan@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
@@ -515,21 +515,9 @@ static int nv_asic_mode2_reset(struct amdgpu_device *adev)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool nv_asic_supports_baco(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
struct smu_context *smu = &adev->smu;
|
|
||||||
|
|
||||||
if (smu_baco_is_support(smu))
|
|
||||||
return true;
|
|
||||||
else
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static enum amd_reset_method
|
static enum amd_reset_method
|
||||||
nv_asic_reset_method(struct amdgpu_device *adev)
|
nv_asic_reset_method(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct smu_context *smu = &adev->smu;
|
|
||||||
|
|
||||||
if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
|
if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
|
||||||
amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
|
amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
|
||||||
amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
|
amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
|
||||||
@@ -548,7 +536,7 @@ nv_asic_reset_method(struct amdgpu_device *adev)
|
|||||||
case CHIP_DIMGREY_CAVEFISH:
|
case CHIP_DIMGREY_CAVEFISH:
|
||||||
return AMD_RESET_METHOD_MODE1;
|
return AMD_RESET_METHOD_MODE1;
|
||||||
default:
|
default:
|
||||||
if (smu_baco_is_support(smu))
|
if (amdgpu_dpm_is_baco_supported(adev))
|
||||||
return AMD_RESET_METHOD_BACO;
|
return AMD_RESET_METHOD_BACO;
|
||||||
else
|
else
|
||||||
return AMD_RESET_METHOD_MODE1;
|
return AMD_RESET_METHOD_MODE1;
|
||||||
@@ -558,7 +546,6 @@ nv_asic_reset_method(struct amdgpu_device *adev)
|
|||||||
static int nv_asic_reset(struct amdgpu_device *adev)
|
static int nv_asic_reset(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct smu_context *smu = &adev->smu;
|
|
||||||
|
|
||||||
switch (nv_asic_reset_method(adev)) {
|
switch (nv_asic_reset_method(adev)) {
|
||||||
case AMD_RESET_METHOD_PCI:
|
case AMD_RESET_METHOD_PCI:
|
||||||
@@ -567,13 +554,7 @@ static int nv_asic_reset(struct amdgpu_device *adev)
|
|||||||
break;
|
break;
|
||||||
case AMD_RESET_METHOD_BACO:
|
case AMD_RESET_METHOD_BACO:
|
||||||
dev_info(adev->dev, "BACO reset\n");
|
dev_info(adev->dev, "BACO reset\n");
|
||||||
|
ret = amdgpu_dpm_baco_reset(adev);
|
||||||
ret = smu_baco_enter(smu);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
ret = smu_baco_exit(smu);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
break;
|
break;
|
||||||
case AMD_RESET_METHOD_MODE2:
|
case AMD_RESET_METHOD_MODE2:
|
||||||
dev_info(adev->dev, "MODE2 reset\n");
|
dev_info(adev->dev, "MODE2 reset\n");
|
||||||
@@ -981,7 +962,7 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
|
|||||||
.need_full_reset = &nv_need_full_reset,
|
.need_full_reset = &nv_need_full_reset,
|
||||||
.need_reset_on_init = &nv_need_reset_on_init,
|
.need_reset_on_init = &nv_need_reset_on_init,
|
||||||
.get_pcie_replay_count = &nv_get_pcie_replay_count,
|
.get_pcie_replay_count = &nv_get_pcie_replay_count,
|
||||||
.supports_baco = &nv_asic_supports_baco,
|
.supports_baco = &amdgpu_dpm_is_baco_supported,
|
||||||
.pre_asic_init = &nv_pre_asic_init,
|
.pre_asic_init = &nv_pre_asic_init,
|
||||||
.update_umd_stable_pstate = &nv_update_umd_stable_pstate,
|
.update_umd_stable_pstate = &nv_update_umd_stable_pstate,
|
||||||
.query_video_codecs = &nv_query_video_codecs,
|
.query_video_codecs = &nv_query_video_codecs,
|
||||||
|
|||||||
@@ -1250,15 +1250,6 @@ int smu_get_power_limit(struct smu_context *smu,
|
|||||||
uint32_t *limit,
|
uint32_t *limit,
|
||||||
enum smu_ppt_limit_level limit_level);
|
enum smu_ppt_limit_level limit_level);
|
||||||
|
|
||||||
int smu_set_azalia_d3_pme(struct smu_context *smu);
|
|
||||||
|
|
||||||
bool smu_baco_is_support(struct smu_context *smu);
|
|
||||||
|
|
||||||
int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state);
|
|
||||||
|
|
||||||
int smu_baco_enter(struct smu_context *smu);
|
|
||||||
int smu_baco_exit(struct smu_context *smu);
|
|
||||||
|
|
||||||
bool smu_mode1_reset_is_support(struct smu_context *smu);
|
bool smu_mode1_reset_is_support(struct smu_context *smu);
|
||||||
bool smu_mode2_reset_is_support(struct smu_context *smu);
|
bool smu_mode2_reset_is_support(struct smu_context *smu);
|
||||||
int smu_mode1_reset(struct smu_context *smu);
|
int smu_mode1_reset(struct smu_context *smu);
|
||||||
|
|||||||
@@ -2668,48 +2668,6 @@ static int smu_set_xgmi_pstate(void *handle,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int smu_set_azalia_d3_pme(struct smu_context *smu)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
|
|
||||||
mutex_lock(&smu->mutex);
|
|
||||||
|
|
||||||
if (smu->ppt_funcs->set_azalia_d3_pme)
|
|
||||||
ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
|
|
||||||
|
|
||||||
mutex_unlock(&smu->mutex);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* On system suspending or resetting, the dpm_enabled
|
|
||||||
* flag will be cleared. So that those SMU services which
|
|
||||||
* are not supported will be gated.
|
|
||||||
*
|
|
||||||
* However, the baco/mode1 reset should still be granted
|
|
||||||
* as they are still supported and necessary.
|
|
||||||
*/
|
|
||||||
bool smu_baco_is_support(struct smu_context *smu)
|
|
||||||
{
|
|
||||||
bool ret = false;
|
|
||||||
|
|
||||||
if (!smu->pm_enabled)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
mutex_lock(&smu->mutex);
|
|
||||||
|
|
||||||
if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
|
|
||||||
ret = smu->ppt_funcs->baco_is_support(smu);
|
|
||||||
|
|
||||||
mutex_unlock(&smu->mutex);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int smu_get_baco_capability(void *handle, bool *cap)
|
static int smu_get_baco_capability(void *handle, bool *cap)
|
||||||
{
|
{
|
||||||
struct smu_context *smu = handle;
|
struct smu_context *smu = handle;
|
||||||
@@ -2730,59 +2688,6 @@ static int smu_get_baco_capability(void *handle, bool *cap)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
|
|
||||||
{
|
|
||||||
if (smu->ppt_funcs->baco_get_state)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
mutex_lock(&smu->mutex);
|
|
||||||
*state = smu->ppt_funcs->baco_get_state(smu);
|
|
||||||
mutex_unlock(&smu->mutex);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int smu_baco_enter(struct smu_context *smu)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (!smu->pm_enabled)
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
|
|
||||||
mutex_lock(&smu->mutex);
|
|
||||||
|
|
||||||
if (smu->ppt_funcs->baco_enter)
|
|
||||||
ret = smu->ppt_funcs->baco_enter(smu);
|
|
||||||
|
|
||||||
mutex_unlock(&smu->mutex);
|
|
||||||
|
|
||||||
if (ret)
|
|
||||||
dev_err(smu->adev->dev, "Failed to enter BACO state!\n");
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int smu_baco_exit(struct smu_context *smu)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (!smu->pm_enabled)
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
|
|
||||||
mutex_lock(&smu->mutex);
|
|
||||||
|
|
||||||
if (smu->ppt_funcs->baco_exit)
|
|
||||||
ret = smu->ppt_funcs->baco_exit(smu);
|
|
||||||
|
|
||||||
mutex_unlock(&smu->mutex);
|
|
||||||
|
|
||||||
if (ret)
|
|
||||||
dev_err(smu->adev->dev, "Failed to exit BACO state!\n");
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int smu_baco_set_state(void *handle, int state)
|
static int smu_baco_set_state(void *handle, int state)
|
||||||
{
|
{
|
||||||
struct smu_context *smu = handle;
|
struct smu_context *smu = handle;
|
||||||
|
|||||||
Reference in New Issue
Block a user