drm/amd/pm: correct the workload setting

Correct the workload setting in order not to mix the setting
with the end user. Update the workload mask accordingly.

v2: changes as below:
1. the end user can not erase the workload from driver except default workload.
2. always shows the real highest priority workoad to the end user.
3. the real workload mask is combined with driver workload mask and end user workload mask.

v3: apply this to the other ASICs as well.
v4: simplify the code
v5: refine the code based on the review comments.

Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Kenneth Feng 2024-10-30 13:22:44 +08:00 committed by Alex Deucher
parent e5ad71779d
commit 8cc438be5d
12 changed files with 84 additions and 36 deletions

View File

@ -1261,26 +1261,33 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
smu->watermarks_bitmap = 0; smu->watermarks_bitmap = 0;
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->user_dpm_profile.user_workload_mask = 0;
atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; smu->workload_priority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; smu->workload_priority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; smu->workload_priority[PP_SMC_POWER_PROFILE_VR] = 4;
smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; smu->workload_priority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; smu->workload_priority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
if (smu->is_apu || if (smu->is_apu ||
!smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) {
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; smu->driver_workload_mask =
else 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D]; } else {
smu->driver_workload_mask =
1 << smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
}
smu->workload_mask = smu->driver_workload_mask |
smu->user_dpm_profile.user_workload_mask;
smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
@ -2355,17 +2362,20 @@ static int smu_switch_power_profile(void *handle,
return -EINVAL; return -EINVAL;
if (!en) { if (!en) {
smu->workload_mask &= ~(1 << smu->workload_prority[type]); smu->driver_workload_mask &= ~(1 << smu->workload_priority[type]);
index = fls(smu->workload_mask); index = fls(smu->workload_mask);
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index]; workload[0] = smu->workload_setting[index];
} else { } else {
smu->workload_mask |= (1 << smu->workload_prority[type]); smu->driver_workload_mask |= (1 << smu->workload_priority[type]);
index = fls(smu->workload_mask); index = fls(smu->workload_mask);
index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index]; workload[0] = smu->workload_setting[index];
} }
smu->workload_mask = smu->driver_workload_mask |
smu->user_dpm_profile.user_workload_mask;
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
smu_bump_power_profile_mode(smu, workload, 0); smu_bump_power_profile_mode(smu, workload, 0);
@ -3056,12 +3066,23 @@ static int smu_set_power_profile_mode(void *handle,
uint32_t param_size) uint32_t param_size)
{ {
struct smu_context *smu = handle; struct smu_context *smu = handle;
int ret;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
!smu->ppt_funcs->set_power_profile_mode) !smu->ppt_funcs->set_power_profile_mode)
return -EOPNOTSUPP; return -EOPNOTSUPP;
return smu_bump_power_profile_mode(smu, param, param_size); if (smu->user_dpm_profile.user_workload_mask &
(1 << smu->workload_priority[param[param_size]]))
return 0;
smu->user_dpm_profile.user_workload_mask =
(1 << smu->workload_priority[param[param_size]]);
smu->workload_mask = smu->user_dpm_profile.user_workload_mask |
smu->driver_workload_mask;
ret = smu_bump_power_profile_mode(smu, param, param_size);
return ret;
} }
static int smu_get_fan_control_mode(void *handle, u32 *fan_mode) static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)

View File

@ -240,6 +240,7 @@ struct smu_user_dpm_profile {
/* user clock state information */ /* user clock state information */
uint32_t clk_mask[SMU_CLK_COUNT]; uint32_t clk_mask[SMU_CLK_COUNT];
uint32_t clk_dependency; uint32_t clk_dependency;
uint32_t user_workload_mask;
}; };
#define SMU_TABLE_INIT(tables, table_id, s, a, d) \ #define SMU_TABLE_INIT(tables, table_id, s, a, d) \
@ -557,7 +558,8 @@ struct smu_context {
bool disable_uclk_switch; bool disable_uclk_switch;
uint32_t workload_mask; uint32_t workload_mask;
uint32_t workload_prority[WORKLOAD_POLICY_MAX]; uint32_t driver_workload_mask;
uint32_t workload_priority[WORKLOAD_POLICY_MAX];
uint32_t workload_setting[WORKLOAD_POLICY_MAX]; uint32_t workload_setting[WORKLOAD_POLICY_MAX];
uint32_t power_profile_mode; uint32_t power_profile_mode;
uint32_t default_power_profile_mode; uint32_t default_power_profile_mode;

View File

@ -1455,7 +1455,6 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
return -EINVAL; return -EINVAL;
} }
if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) && if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) &&
(smu->smc_fw_version >= 0x360d00)) { (smu->smc_fw_version >= 0x360d00)) {
if (size != 10) if (size != 10)
@ -1523,14 +1522,14 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
ret = smu_cmn_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask, SMU_MSG_SetWorkloadMask,
1 << workload_type, smu->workload_mask,
NULL); NULL);
if (ret) { if (ret) {
dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type); dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type);
return ret; return ret;
} }
smu->power_profile_mode = profile_mode; smu_cmn_assign_power_profile(smu);
return 0; return 0;
} }

View File

@ -2083,10 +2083,13 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
smu->power_profile_mode); smu->power_profile_mode);
if (workload_type < 0) if (workload_type < 0)
return -EINVAL; return -EINVAL;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1 << workload_type, NULL); smu->workload_mask, NULL);
if (ret) if (ret)
dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
else
smu_cmn_assign_power_profile(smu);
return ret; return ret;
} }

View File

@ -1788,10 +1788,13 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *
smu->power_profile_mode); smu->power_profile_mode);
if (workload_type < 0) if (workload_type < 0)
return -EINVAL; return -EINVAL;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1 << workload_type, NULL); smu->workload_mask, NULL);
if (ret) if (ret)
dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
else
smu_cmn_assign_power_profile(smu);
return ret; return ret;
} }

View File

@ -1081,7 +1081,7 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input,
} }
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
1 << workload_type, smu->workload_mask,
NULL); NULL);
if (ret) { if (ret) {
dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
@ -1089,7 +1089,7 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input,
return ret; return ret;
} }
smu->power_profile_mode = profile_mode; smu_cmn_assign_power_profile(smu);
return 0; return 0;
} }

View File

@ -892,14 +892,14 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
} }
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
1 << workload_type, smu->workload_mask,
NULL); NULL);
if (ret) { if (ret) {
dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type); dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type);
return ret; return ret;
} }
smu->power_profile_mode = profile_mode; smu_cmn_assign_power_profile(smu);
return 0; return 0;
} }

View File

@ -2473,7 +2473,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
DpmActivityMonitorCoeffInt_t *activity_monitor = DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt); &(activity_monitor_external.DpmActivityMonitorCoeffInt);
int workload_type, ret = 0; int workload_type, ret = 0;
u32 workload_mask, selected_workload_mask; u32 workload_mask;
smu->power_profile_mode = input[size]; smu->power_profile_mode = input[size];
@ -2540,7 +2540,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
if (workload_type < 0) if (workload_type < 0)
return -EINVAL; return -EINVAL;
selected_workload_mask = workload_mask = 1 << workload_type; workload_mask = 1 << workload_type;
/* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */ /* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) && if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
@ -2555,12 +2555,22 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
workload_mask |= 1 << workload_type; workload_mask |= 1 << workload_type;
} }
smu->workload_mask |= workload_mask;
ret = smu_cmn_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask, SMU_MSG_SetWorkloadMask,
workload_mask, smu->workload_mask,
NULL); NULL);
if (!ret) if (!ret) {
smu->workload_mask = selected_workload_mask; smu_cmn_assign_power_profile(smu);
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) {
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
PP_SMC_POWER_PROFILE_FULLSCREEN3D);
smu->power_profile_mode = smu->workload_mask & (1 << workload_type)
? PP_SMC_POWER_PROFILE_FULLSCREEN3D
: PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
}
}
return ret; return ret;
} }

View File

@ -2487,13 +2487,14 @@ static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *inp
smu->power_profile_mode); smu->power_profile_mode);
if (workload_type < 0) if (workload_type < 0)
return -EINVAL; return -EINVAL;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1 << workload_type, NULL); smu->workload_mask, NULL);
if (ret) if (ret)
dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
else else
smu->workload_mask = (1 << workload_type); smu_cmn_assign_power_profile(smu);
return ret; return ret;
} }

View File

@ -1795,12 +1795,11 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
if (workload_type < 0) if (workload_type < 0)
return -EINVAL; return -EINVAL;
ret = smu_cmn_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
SMU_MSG_SetWorkloadMask, smu->workload_mask, NULL);
1 << workload_type,
NULL);
if (!ret) if (!ret)
smu->workload_mask = 1 << workload_type; smu_cmn_assign_power_profile(smu);
return ret; return ret;
} }

View File

@ -1141,6 +1141,14 @@ int smu_cmn_set_mp1_state(struct smu_context *smu,
return ret; return ret;
} }
void smu_cmn_assign_power_profile(struct smu_context *smu)
{
uint32_t index;
index = fls(smu->workload_mask);
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
smu->power_profile_mode = smu->workload_setting[index];
}
bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev) bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
{ {
struct pci_dev *p = NULL; struct pci_dev *p = NULL;

View File

@ -130,6 +130,8 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
int smu_cmn_set_mp1_state(struct smu_context *smu, int smu_cmn_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state); enum pp_mp1_state mp1_state);
void smu_cmn_assign_power_profile(struct smu_context *smu);
/* /*
* Helper function to make sysfs_emit_at() happy. Align buf to * Helper function to make sysfs_emit_at() happy. Align buf to
* the current page boundary and record the offset. * the current page boundary and record the offset.