drm/amdgpu: move smu_debug_mask to a more proper place
As the smu_context will be invisible from outside(of power). Also, the smu_debug_mask can be shared around all power code instead of some specific framework(swSMU) only. Signed-off-by: Evan Quan <evan.quan@amd.com> Reviewed-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Guchun Chen <guchun.chen@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
fa4a427d84
commit
7e31a8585b
@ -1619,7 +1619,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
|
||||
debugfs_create_x32("amdgpu_smu_debug", 0600, root,
|
||||
&adev->smu.smu_debug_mask);
|
||||
&adev->pm.smu_debug_mask);
|
||||
|
||||
ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
|
||||
&fops_ib_preempt);
|
||||
|
@ -423,6 +423,9 @@ enum ip_power_state {
|
||||
POWER_STATE_OFF,
|
||||
};
|
||||
|
||||
/* Used to mask smu debug modes */
|
||||
#define SMU_DEBUG_HALT_ON_ERROR 0x1
|
||||
|
||||
struct amdgpu_pm {
|
||||
struct mutex mutex;
|
||||
u32 current_sclk;
|
||||
@ -460,6 +463,11 @@ struct amdgpu_pm {
|
||||
struct list_head pm_attr_list;
|
||||
|
||||
atomic_t pwr_state[AMD_IP_BLOCK_TYPE_NUM];
|
||||
|
||||
/*
|
||||
* 0 = disabled (default), otherwise enable corresponding debug mode
|
||||
*/
|
||||
uint32_t smu_debug_mask;
|
||||
};
|
||||
|
||||
#define R600_SSTU_DFLT 0
|
||||
|
@ -482,9 +482,6 @@ struct stb_context {
|
||||
|
||||
#define WORKLOAD_POLICY_MAX 7
|
||||
|
||||
/* Used to mask smu debug modes */
|
||||
#define SMU_DEBUG_HALT_ON_ERROR 0x1
|
||||
|
||||
struct smu_context
|
||||
{
|
||||
struct amdgpu_device *adev;
|
||||
@ -573,11 +570,6 @@ struct smu_context
|
||||
struct smu_user_dpm_profile user_dpm_profile;
|
||||
|
||||
struct stb_context stb_context;
|
||||
|
||||
/*
|
||||
* 0 = disabled (default), otherwise enable corresponding debug mode
|
||||
*/
|
||||
uint32_t smu_debug_mask;
|
||||
};
|
||||
|
||||
struct i2c_adapter;
|
||||
|
@ -257,10 +257,11 @@ int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
|
||||
uint16_t msg_index,
|
||||
uint32_t param)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
u32 reg;
|
||||
int res;
|
||||
|
||||
if (smu->adev->no_hw_access)
|
||||
if (adev->no_hw_access)
|
||||
return 0;
|
||||
|
||||
reg = __smu_cmn_poll_stat(smu);
|
||||
@ -272,9 +273,9 @@ int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
|
||||
__smu_cmn_send_msg(smu, msg_index, param);
|
||||
res = 0;
|
||||
Out:
|
||||
if (unlikely(smu->smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
|
||||
if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
|
||||
res && (res != -ETIME)) {
|
||||
amdgpu_device_halt(smu->adev);
|
||||
amdgpu_device_halt(adev);
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
@ -299,7 +300,7 @@ int smu_cmn_wait_for_response(struct smu_context *smu)
|
||||
reg = __smu_cmn_poll_stat(smu);
|
||||
res = __smu_cmn_reg2errno(smu, reg);
|
||||
|
||||
if (unlikely(smu->smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
|
||||
if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
|
||||
res && (res != -ETIME)) {
|
||||
amdgpu_device_halt(smu->adev);
|
||||
WARN_ON(1);
|
||||
@ -343,10 +344,11 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
|
||||
uint32_t param,
|
||||
uint32_t *read_arg)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int res, index;
|
||||
u32 reg;
|
||||
|
||||
if (smu->adev->no_hw_access)
|
||||
if (adev->no_hw_access)
|
||||
return 0;
|
||||
|
||||
index = smu_cmn_to_asic_specific_index(smu,
|
||||
@ -372,8 +374,8 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
|
||||
if (read_arg)
|
||||
smu_cmn_read_arg(smu, read_arg);
|
||||
Out:
|
||||
if (unlikely(smu->smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) {
|
||||
amdgpu_device_halt(smu->adev);
|
||||
if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) {
|
||||
amdgpu_device_halt(adev);
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user