amd-drm-fixes-6.9-2024-04-10:

amdgpu:
 - GPU reset fixes
 - Fix some confusing logging
 - UMSCH fix
 - Aborted suspend fix
 - DCN 3.5 fixes
 - S4 fix
 - MES logging fixes
 - SMU 14 fixes
 - SDMA 4.4.2 fix
 - KASAN fix
 - SMU 13.0.10 fix
 - VCN partition fix
 - GFX11 fixes
 - DWB fixes
 - Plane handling fix
 - FAMS fix
 - DCN 3.1.6 fix
 - VSC SDP fixes
 - OLED panel fix
 - GFX 11.5 fix
 
 amdkfd:
 - GPU reset fixes
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCZhc6QwAKCRC93/aFa7yZ
 2ClTAQDvksG58Ib4Zu+3m0pPuCTeHFdh1pTkgoreviaPzTg5SQEA9/oDD6iKKJ9t
 pJL+NdY21YyO4yeMJ7JqMnkgwmkiHwQ=
 =dewY
 -----END PGP SIGNATURE-----

Merge tag 'amd-drm-fixes-6.9-2024-04-10' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.9-2024-04-10:

amdgpu:
- GPU reset fixes
- Fix some confusing logging
- UMSCH fix
- Aborted suspend fix
- DCN 3.5 fixes
- S4 fix
- MES logging fixes
- SMU 14 fixes
- SDMA 4.4.2 fix
- KASAN fix
- SMU 13.0.10 fix
- VCN partition fix
- GFX11 fixes
- DWB fixes
- Plane handling fix
- FAMS fix
- DCN 3.1.6 fix
- VSC SDP fixes
- OLED panel fix
- GFX 11.5 fix

amdkfd:
- GPU reset fixes

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240411013425.6431-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie 2024-04-11 14:47:29 +10:00
commit b4589db566
32 changed files with 652 additions and 127 deletions

View File

@ -210,6 +210,7 @@ extern int amdgpu_async_gfx_ring;
extern int amdgpu_mcbp;
extern int amdgpu_discovery;
extern int amdgpu_mes;
extern int amdgpu_mes_log_enable;
extern int amdgpu_mes_kiq;
extern int amdgpu_noretry;
extern int amdgpu_force_asic_type;

View File

@ -4135,18 +4135,22 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->ip_blocks[i].status.hw = true;
}
}
} else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
!amdgpu_device_has_display_hardware(adev)) {
r = psp_gpu_reset(adev);
} else {
tmp = amdgpu_reset_method;
/* It should do a default reset when loading or reloading the driver,
* regardless of the module parameter reset_method.
*/
amdgpu_reset_method = AMD_RESET_METHOD_NONE;
r = amdgpu_asic_reset(adev);
amdgpu_reset_method = tmp;
if (r) {
dev_err(adev->dev, "asic reset on init failed\n");
goto failed;
}
tmp = amdgpu_reset_method;
/* It should do a default reset when loading or reloading the driver,
* regardless of the module parameter reset_method.
*/
amdgpu_reset_method = AMD_RESET_METHOD_NONE;
r = amdgpu_asic_reset(adev);
amdgpu_reset_method = tmp;
}
if (r) {
dev_err(adev->dev, "asic reset on init failed\n");
goto failed;
}
}

View File

@ -1896,6 +1896,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
break;
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 1):
amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
break;
default:

View File

@ -195,6 +195,7 @@ int amdgpu_async_gfx_ring = 1;
int amdgpu_mcbp = -1;
int amdgpu_discovery = -1;
int amdgpu_mes;
int amdgpu_mes_log_enable = 0;
int amdgpu_mes_kiq;
int amdgpu_noretry = -1;
int amdgpu_force_asic_type = -1;
@ -667,6 +668,15 @@ MODULE_PARM_DESC(mes,
"Enable Micro Engine Scheduler (0 = disabled (default), 1 = enabled)");
module_param_named(mes, amdgpu_mes, int, 0444);
/**
* DOC: mes_log_enable (int)
* Enable Micro Engine Scheduler log. This is used to enable/disable MES internal log.
* (0 = disabled (default), 1 = enabled)
*/
MODULE_PARM_DESC(mes_log_enable,
"Enable Micro Engine Scheduler log (0 = disabled (default), 1 = enabled)");
module_param_named(mes_log_enable, amdgpu_mes_log_enable, int, 0444);
/**
* DOC: mes_kiq (int)
* Enable Micro Engine Scheduler KIQ. This is a new engine pipe for kiq.

View File

@ -304,12 +304,15 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
dma_fence_set_error(finished, -ECANCELED);
if (finished->error < 0) {
DRM_INFO("Skip scheduling IBs!\n");
dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)",
ring->name);
} else {
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
&fence);
if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
dev_err(adev->dev,
"Error scheduling IBs (%d) in ring(%s)", r,
ring->name);
}
job->job_run_counter++;

View File

@ -102,7 +102,10 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
{
int r;
r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
if (!amdgpu_mes_log_enable)
return 0;
r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&adev->mes.event_log_gpu_obj,
&adev->mes.event_log_gpu_addr,
@ -1549,12 +1552,11 @@ static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
mem, PAGE_SIZE, false);
mem, AMDGPU_MES_LOG_BUFFER_SIZE, false);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
#endif
@ -1565,7 +1567,7 @@ void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *root = minor->debugfs_root;
if (adev->enable_mes)
if (adev->enable_mes && amdgpu_mes_log_enable)
debugfs_create_file("amdgpu_mes_event_log", 0444, root,
adev, &amdgpu_debugfs_mes_event_log_fops);

View File

@ -52,6 +52,7 @@ enum amdgpu_mes_priority_level {
#define AMDGPU_MES_PROC_CTX_SIZE 0x1000 /* one page area */
#define AMDGPU_MES_GANG_CTX_SIZE 0x1000 /* one page area */
#define AMDGPU_MES_LOG_BUFFER_SIZE 0x4000 /* Maximu log buffer size for MES */
struct amdgpu_mes_funcs;

View File

@ -62,6 +62,11 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
}
static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
{
return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
}
static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
uint32_t inst_idx, struct amdgpu_ring *ring)
{
@ -87,7 +92,7 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
case AMDGPU_RING_TYPE_VCN_ENC:
case AMDGPU_RING_TYPE_VCN_JPEG:
ip_blk = AMDGPU_XCP_VCN;
if (adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
if (aqua_vanjaram_xcp_vcn_shared(adev))
inst_mask = 1 << (inst_idx * 2);
break;
default:
@ -140,10 +145,12 @@ static int aqua_vanjaram_xcp_sched_list_update(
aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
/* VCN is shared by two partitions under CPX MODE */
/* VCN may be shared by two partitions under CPX MODE in certain
* configs.
*/
if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
aqua_vanjaram_xcp_vcn_shared(adev))
aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
}

View File

@ -1635,7 +1635,7 @@ static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
}
active_rb_bitmap |= global_active_rb_bitmap;
active_rb_bitmap &= global_active_rb_bitmap;
adev->gfx.config.backend_enable_mask = active_rb_bitmap;
adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
}
@ -5465,6 +5465,7 @@ static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
/* Make sure that we can't skip the SET_Q_MODE packets when the VM
* changed in any way.
*/
ring->set_q_mode_offs = 0;
ring->set_q_mode_ptr = NULL;
}

View File

@ -411,8 +411,11 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
mes_set_hw_res_pkt.oversubscription_timer = 50;
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
if (amdgpu_mes_log_enable) {
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr =
mes->event_log_gpu_addr;
}
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),

View File

@ -1602,19 +1602,9 @@ static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL,
DRAM_ECC_INT_ENABLE, 0);
WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
break;
/* sdma ecc interrupt is enabled by default
* driver doesn't need to do anything to
* enable the interrupt */
case AMDGPU_IRQ_STATE_ENABLE:
default:
break;
}
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, DRAM_ECC_INT_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
return 0;
}

View File

@ -457,10 +457,8 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
return false;
default:
return true;
}
@ -722,7 +720,10 @@ static int soc21_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_GFX_PG;
adev->external_rev_id = adev->rev_id + 0x1;
if (adev->rev_id == 0)
adev->external_rev_id = 0x1;
else
adev->external_rev_id = adev->rev_id + 0x10;
break;
case IP_VERSION(11, 5, 1):
adev->cg_flags =
@ -869,10 +870,35 @@ static int soc21_common_suspend(void *handle)
return soc21_common_hw_fini(adev);
}
static bool soc21_need_reset_on_resume(struct amdgpu_device *adev)
{
u32 sol_reg1, sol_reg2;
/* Will reset for the following suspend abort cases.
* 1) Only reset dGPU side.
* 2) S3 suspend got aborted and TOS is active.
*/
if (!(adev->flags & AMD_IS_APU) && adev->in_s3 &&
!adev->suspend_complete) {
sol_reg1 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
msleep(100);
sol_reg2 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
return (sol_reg1 != sol_reg2);
}
return false;
}
static int soc21_common_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (soc21_need_reset_on_resume(adev)) {
dev_info(adev->dev, "S3 suspend aborted, resetting...");
soc21_asic_reset(adev);
}
return soc21_common_hw_init(adev);
}

View File

@ -225,6 +225,8 @@ static int umsch_mm_v4_0_ring_start(struct amdgpu_umsch_mm *umsch)
WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_SIZE, ring->ring_size);
ring->wptr = 0;
data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE);
data &= ~(VCN_RB_ENABLE__AUDIO_RB_EN_MASK);
WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data);

View File

@ -960,7 +960,6 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{
struct kfd_node *node;
int i;
int count;
if (!kfd->init_complete)
return;
@ -968,12 +967,10 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
/* for runtime suspend, skip locking kfd */
if (!run_pm) {
mutex_lock(&kfd_processes_mutex);
count = ++kfd_locked;
mutex_unlock(&kfd_processes_mutex);
/* For first KFD device suspend all the KFD processes */
if (count == 1)
if (++kfd_locked == 1)
kfd_suspend_all_processes();
mutex_unlock(&kfd_processes_mutex);
}
for (i = 0; i < kfd->num_nodes; i++) {
@ -984,7 +981,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
int ret, count, i;
int ret, i;
if (!kfd->init_complete)
return 0;
@ -998,12 +995,10 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
/* for runtime resume, skip unlocking kfd */
if (!run_pm) {
mutex_lock(&kfd_processes_mutex);
count = --kfd_locked;
mutex_unlock(&kfd_processes_mutex);
WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
if (count == 0)
if (--kfd_locked == 0)
ret = kfd_resume_all_processes();
WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
mutex_unlock(&kfd_processes_mutex);
}
return ret;

View File

@ -2001,6 +2001,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
dev_err(dev, "HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
while (halt_if_hws_hang)
schedule();
kfd_hws_hang(dqm);
return -ETIME;
}

View File

@ -148,6 +148,9 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
#define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);
#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB);
/* Number of bytes in PSP header for firmware. */
#define PSP_HEADER_BYTES 0x100
@ -3044,6 +3047,10 @@ static int dm_resume(void *handle)
/* Do mst topology probing after resuming cached state*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
aconnector->mst_root)
@ -4820,9 +4827,11 @@ static int dm_init_microcode(struct amdgpu_device *adev)
fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
break;
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
fw_name_dmub = FIRMWARE_DCN_35_DMUB;
break;
case IP_VERSION(3, 5, 1):
fw_name_dmub = FIRMWARE_DCN_351_DMUB;
break;
default:
/* ASIC doesn't support DMUB. */
return 0;
@ -5921,6 +5930,9 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
&aconnector->base.probed_modes :
&aconnector->base.modes;
if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
return NULL;
if (aconnector->freesync_vid_base.clock != 0)
return &aconnector->freesync_vid_base;
@ -6306,19 +6318,16 @@ create_stream_for_sink(struct drm_connector *connector,
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
stream->signal == SIGNAL_TYPE_EDP) {
//
// should decide stream support vsc sdp colorimetry capability
// before building vsc info packet
//
stream->use_vsc_sdp_for_colorimetry = false;
if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
stream->use_vsc_sdp_for_colorimetry =
aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
} else {
if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
stream->use_vsc_sdp_for_colorimetry = true;
}
stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED;
if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
tf = TRANSFER_FUNC_GAMMA_22;
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
@ -8762,10 +8771,10 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev,
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
continue;
notify:
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
notify:
aconnector = to_amdgpu_dm_connector(connector);
mutex_lock(&adev->dm.audio_lock);

View File

@ -76,10 +76,8 @@ static int amdgpu_dm_wb_encoder_atomic_check(struct drm_encoder *encoder,
static int amdgpu_dm_wb_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
return drm_add_modes_noedid(connector, dev->mode_config.max_width,
dev->mode_config.max_height);
/* Maximum resolution supported by DWB */
return drm_add_modes_noedid(connector, 3840, 2160);
}
static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector,

View File

@ -99,20 +99,25 @@ static int dcn316_get_active_display_cnt_wa(
return display_count;
}
static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
bool safe_to_lower, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
struct pipe_ctx *pipe = safe_to_lower
? &context->res_ctx.pipe_ctx[i]
: &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
dc_is_virtual_signal(pipe->stream->signal))) {
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
!pipe->stream->link_enc)) {
if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
} else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
@ -207,11 +212,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
dcn316_disable_otg_wa(clk_mgr_base, context, true);
dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
dcn316_disable_otg_wa(clk_mgr_base, context, false);
dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
}

View File

@ -73,6 +73,12 @@
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
#define regCLK5_0_CLK5_spll_field_8 0x464b
#define regCLK5_0_CLK5_spll_field_8_BASE_IDX 0
#define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT 0xd
#define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
#define REG(reg_name) \
@ -411,6 +417,17 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs
{
}
static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_context *ctx = clk_mgr->base.ctx;
uint32_t ssc_enable;
REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable);
return ssc_enable == 1;
}
static void init_clk_states(struct clk_mgr *clk_mgr)
{
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
@ -428,7 +445,16 @@ static void init_clk_states(struct clk_mgr *clk_mgr)
void dcn35_init_clocks(struct clk_mgr *clk_mgr)
{
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
init_clk_states(clk_mgr);
// to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
if (dcn35_is_spll_ssc_enabled(clk_mgr))
clk_mgr->dp_dto_source_clock_in_khz =
dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz);
else
clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
}
static struct clk_bw_params dcn35_bw_params = {
.vram_type = Ddr4MemType,
@ -517,6 +543,28 @@ static DpmClocks_t_dcn35 dummy_clocks;
static struct dcn35_watermarks dummy_wms = { 0 };
static struct dcn35_ss_info_table ss_info_table = {
.ss_divider = 1000,
.ss_percentage = {0, 0, 375, 375, 375}
};
static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
{
struct dc_context *ctx = clk_mgr->base.ctx;
uint32_t clock_source;
REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
// If it's DFS mode, clock_source is 0.
if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) {
clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
if (clk_mgr->dprefclk_ss_percentage != 0) {
clk_mgr->ss_on_dprefclk = true;
clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
}
}
}
static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table)
{
int i, num_valid_sets;
@ -1061,6 +1109,8 @@ void dcn35_clk_mgr_construct(
dce_clock_read_ss_info(&clk_mgr->base);
/*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/
dcn35_read_ss_info_from_lut(&clk_mgr->base);
clk_mgr->base.base.bw_params = &dcn35_bw_params;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {

View File

@ -436,6 +436,15 @@ bool dc_state_add_plane(
goto out;
}
if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
/* ODM combine could prevent us from supporting more planes
* we will reset ODM slice count back to 1 when all planes have
* been removed to maximize the amount of planes supported when
* new planes are added.
*/
resource_update_pipes_for_stream_with_slice_count(
state, dc->current_state, dc->res_pool, stream, 1);
otg_master_pipe = resource_get_otg_master_for_stream(
&state->res_ctx, stream);
if (otg_master_pipe)

View File

@ -976,7 +976,10 @@ static bool dcn31_program_pix_clk(
struct bp_pixel_clock_parameters bp_pc_params = {0};
enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
// Apply ssed(spread spectrum) dpref clock for edp only.
if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0
&& pix_clk_params->signal_type == SIGNAL_TYPE_EDP
&& encoding == DP_8b_10b_ENCODING)
dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
// For these signal types Driver to program DP_DTO without calling VBIOS Command table
if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) {
@ -1093,9 +1096,6 @@ static bool get_pixel_clk_frequency_100hz(
unsigned int modulo_hz = 0;
unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz;
if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
if (clock_source->id == CLOCK_SOURCE_ID_DP_DTO) {
clock_hz = REG_READ(PHASE[inst]);

View File

@ -267,9 +267,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc)
OTG_V_TOTAL_MAX_SEL, 1,
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
// Setup manual flow control for EOF via TRIG_A
optc->funcs->setup_manual_trigger(optc);
}
}

View File

@ -735,7 +735,7 @@ static int smu_early_init(void *handle)
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
smu->is_apu = false;
smu->smu_baco.state = SMU_BACO_STATE_EXIT;
smu->smu_baco.state = SMU_BACO_STATE_NONE;
smu->smu_baco.platform_support = false;
smu->user_dpm_profile.fan_mode = -1;
@ -1966,10 +1966,25 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
return 0;
}
static int smu_reset_mp1_state(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
if ((!adev->in_runpm) && (!adev->in_suspend) &&
(!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) ==
IP_VERSION(13, 0, 10) &&
!amdgpu_device_has_display_hardware(adev))
ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
return ret;
}
static int smu_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
@ -1987,7 +2002,15 @@ static int smu_hw_fini(void *handle)
adev->pm.dpm_enabled = false;
return smu_smc_hw_cleanup(smu);
ret = smu_smc_hw_cleanup(smu);
if (ret)
return ret;
ret = smu_reset_mp1_state(smu);
if (ret)
return ret;
return 0;
}
static void smu_late_fini(void *handle)

View File

@ -424,6 +424,7 @@ enum smu_reset_mode {
enum smu_baco_state {
SMU_BACO_STATE_ENTER = 0,
SMU_BACO_STATE_EXIT,
SMU_BACO_STATE_NONE,
};
struct smu_baco_context {

View File

@ -144,6 +144,37 @@ typedef struct {
uint32_t MaxGfxClk;
} DpmClocks_t;
//Freq in MHz
//Voltage in milli volts with 2 fractional bits
typedef struct {
uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
uint32_t VClocks0[NUM_VCN_DPM_LEVELS];
uint32_t VClocks1[NUM_VCN_DPM_LEVELS];
uint32_t DClocks0[NUM_VCN_DPM_LEVELS];
uint32_t DClocks1[NUM_VCN_DPM_LEVELS];
uint32_t VPEClocks[NUM_VPE_DPM_LEVELS];
uint32_t FclkClocks_Freq[NUM_FCLK_DPM_LEVELS];
uint32_t FclkClocks_Voltage[NUM_FCLK_DPM_LEVELS];
uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
MemPstateTable_t MemPstateTable[NUM_MEM_PSTATE_LEVELS];
uint8_t NumDcfClkLevelsEnabled;
uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
uint8_t NumSocClkLevelsEnabled;
uint8_t Vcn0ClkLevelsEnabled; //Applies to both Vclk0 and Dclk0
uint8_t Vcn1ClkLevelsEnabled; //Applies to both Vclk1 and Dclk1
uint8_t VpeClkLevelsEnabled;
uint8_t NumMemPstatesEnabled;
uint8_t NumFclkLevelsEnabled;
uint8_t spare;
uint32_t MinGfxClk;
uint32_t MaxGfxClk;
} DpmClocks_t_v14_0_1;
typedef struct {
uint16_t CoreFrequency[16]; //Target core frequency [MHz]
uint16_t CorePower[16]; //CAC calculated core power [mW]
@ -224,7 +255,7 @@ typedef enum {
#define TABLE_CUSTOM_DPM 2 // Called by Driver
#define TABLE_BIOS_GPIO_CONFIG 3 // Called by BIOS
#define TABLE_DPMCLOCKS 4 // Called by Driver and VBIOS
#define TABLE_SPARE0 5 // Unused
#define TABLE_MOMENTARY_PM 5 // Called by Tools
#define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log
#define TABLE_SMU_METRICS 7 // Called by Driver and SMF/PMF
#define TABLE_COUNT 8

View File

@ -42,7 +42,7 @@
#define FEATURE_EDC_BIT 7
#define FEATURE_PLL_POWER_DOWN_BIT 8
#define FEATURE_VDDOFF_BIT 9
#define FEATURE_VCN_DPM_BIT 10
#define FEATURE_VCN_DPM_BIT 10 /* this is for both VCN0 and VCN1 */
#define FEATURE_DS_MPM_BIT 11
#define FEATURE_FCLK_DPM_BIT 12
#define FEATURE_SOCCLK_DPM_BIT 13
@ -56,9 +56,9 @@
#define FEATURE_DS_GFXCLK_BIT 21
#define FEATURE_DS_SOCCLK_BIT 22
#define FEATURE_DS_LCLK_BIT 23
#define FEATURE_LOW_POWER_DCNCLKS_BIT 24 // for all DISP clks
#define FEATURE_LOW_POWER_DCNCLKS_BIT 24
#define FEATURE_DS_SHUBCLK_BIT 25
#define FEATURE_SPARE0_BIT 26 //SPARE
#define FEATURE_RESERVED0_BIT 26
#define FEATURE_ZSTATES_BIT 27
#define FEATURE_IOMMUL2_PG_BIT 28
#define FEATURE_DS_FCLK_BIT 29
@ -66,8 +66,8 @@
#define FEATURE_DS_MP1CLK_BIT 31
#define FEATURE_WHISPER_MODE_BIT 32
#define FEATURE_SMU_LOW_POWER_BIT 33
#define FEATURE_SMART_L3_RINSER_BIT 34
#define FEATURE_SPARE1_BIT 35 //SPARE
#define FEATURE_RESERVED1_BIT 34 /* v14_0_0 SMART_L3_RINSER; v14_0_1 RESERVED1 */
#define FEATURE_GFX_DEM_BIT 35 /* v14_0_0 SPARE; v14_0_1 GFX_DEM */
#define FEATURE_PSI_BIT 36
#define FEATURE_PROCHOT_BIT 37
#define FEATURE_CPUOFF_BIT 38
@ -77,11 +77,11 @@
#define FEATURE_PERF_LIMIT_BIT 42
#define FEATURE_CORE_DLDO_BIT 43
#define FEATURE_DVO_BIT 44
#define FEATURE_DS_VCN_BIT 45
#define FEATURE_DS_VCN_BIT 45 /* v14_0_1 this is for both VCN0 and VCN1 */
#define FEATURE_CPPC_BIT 46
#define FEATURE_CPPC_PREFERRED_CORES 47
#define FEATURE_DF_CSTATES_BIT 48
#define FEATURE_SPARE2_BIT 49 //SPARE
#define FEATURE_FAST_PSTATE_CLDO_BIT 49 /* v14_0_0 SPARE */
#define FEATURE_ATHUB_PG_BIT 50
#define FEATURE_VDDOFF_ECO_BIT 51
#define FEATURE_ZSTATES_ECO_BIT 52
@ -93,8 +93,8 @@
#define FEATURE_DS_IPUCLK_BIT 58
#define FEATURE_DS_VPECLK_BIT 59
#define FEATURE_VPE_DPM_BIT 60
#define FEATURE_SPARE_61 61
#define FEATURE_FP_DIDT 62
#define FEATURE_SMART_L3_RINSER_BIT 61 /* v14_0_0 SPARE*/
#define FEATURE_PCC_BIT 62 /* v14_0_0 FP_DIDT v14_0_1 PCC_BIT */
#define NUM_FEATURES 63
// Firmware Header/Footer
@ -151,6 +151,43 @@ typedef struct {
// MP1_EXT_SCRATCH7 = RTOS Current Job
} FwStatus_t;
typedef struct {
// MP1_EXT_SCRATCH0
uint32_t DpmHandlerID : 8;
uint32_t ActivityMonitorID : 8;
uint32_t DpmTimerID : 8;
uint32_t DpmHubID : 4;
uint32_t DpmHubTask : 4;
// MP1_EXT_SCRATCH1
uint32_t CclkSyncStatus : 8;
uint32_t ZstateStatus : 4;
uint32_t Cpu1VddOff : 4;
uint32_t DstateFun : 4;
uint32_t DstateDev : 4;
uint32_t GfxOffStatus : 2;
uint32_t Cpu0Off : 2;
uint32_t Cpu1Off : 2;
uint32_t Cpu0VddOff : 2;
// MP1_EXT_SCRATCH2
uint32_t P2JobHandler :32;
// MP1_EXT_SCRATCH3
uint32_t PostCode :32;
// MP1_EXT_SCRATCH4
uint32_t MsgPortBusy :15;
uint32_t RsmuPmiP1Pending : 1;
uint32_t RsmuPmiP2PendingCnt : 8;
uint32_t DfCstateExitPending : 1;
uint32_t Pc6EntryPending : 1;
uint32_t Pc6ExitPending : 1;
uint32_t WarmResetPending : 1;
uint32_t Mp0ClkPending : 1;
uint32_t InWhisperMode : 1;
uint32_t spare2 : 2;
// MP1_EXT_SCRATCH5
uint32_t IdleMask :32;
// MP1_EXT_SCRATCH6 = RTOS threads' status
// MP1_EXT_SCRATCH7 = RTOS Current Job
} FwStatus_t_v14_0_1;
#pragma pack(pop)

View File

@ -72,23 +72,19 @@
#define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK
#define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK
#define PPSMC_MSG_SetSoftMinVcn0 0x15 ///< Set soft min for VCN0 clocks (VCLK0 and DCLK0)
#define PPSMC_MSG_EnableGfxImu 0x16 ///< Enable GFX IMU
#define PPSMC_MSG_spare_0x17 0x17
#define PPSMC_MSG_spare_0x18 0x18
#define PPSMC_MSG_spare_0x17 0x17 ///< Get GFX clock frequency
#define PPSMC_MSG_spare_0x18 0x18 ///< Get FCLK frequency
#define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry
#define PPSMC_MSG_DisallowGfxOff 0x1A ///< Inform PMFW of disallowing GFXOFF entry
#define PPSMC_MSG_SetSoftMaxGfxClk 0x1B ///< Set soft max for GFX CLK
#define PPSMC_MSG_SetHardMinGfxClk 0x1C ///< Set hard min for GFX CLK
#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x1D ///< Set soft max for SOC CLK
#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x1E ///< Set soft max for FCLK
#define PPSMC_MSG_SetSoftMaxVcn0 0x1F ///< Set soft max for VCN0 clocks (VCLK0 and DCLK0)
#define PPSMC_MSG_spare_0x20 0x20
#define PPSMC_MSG_spare_0x20 0x20 ///< Set power limit percentage
#define PPSMC_MSG_PowerDownJpeg0 0x21 ///< Power down Jpeg of VCN0
#define PPSMC_MSG_PowerUpJpeg0 0x22 ///< Power up Jpeg of VCN0; VCN0 is power gated by default
#define PPSMC_MSG_SetHardMinFclkByFreq 0x23 ///< Set hard min for FCLK
#define PPSMC_MSG_SetSoftMinSocclkByFreq 0x24 ///< Set soft min for SOC CLK
#define PPSMC_MSG_AllowZstates 0x25 ///< Inform PMFM of allowing Zstate entry, i.e. no Miracast activity
@ -99,8 +95,8 @@
#define PPSMC_MSG_PowerUpIspByTile 0x2A ///< This message is used to power up ISP tiles and enable the ISP DPM
#define PPSMC_MSG_SetHardMinIspiclkByFreq 0x2B ///< Set HardMin by frequency for ISPICLK
#define PPSMC_MSG_SetHardMinIspxclkByFreq 0x2C ///< Set HardMin by frequency for ISPXCLK
#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN.UMSCH (aka VSCH) scheduler
#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN.UMSCH (aka VSCH) scheduler
#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN0.UMSCH (aka VSCH) scheduler
#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN0.UMSCH (aka VSCH) scheduler
#define PPSMC_Message_IspStutterOn_MmhubPgDis 0x2F ///< ISP StutterOn mmHub PgDis
#define PPSMC_Message_IspStutterOff_MmhubPgEn 0x30 ///< ISP StufferOff mmHub PgEn
#define PPSMC_MSG_PowerUpVpe 0x31 ///< Power up VPE
@ -110,7 +106,9 @@
#define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA
#define PPSMC_MSG_SetSoftMaxVpe 0x36 ///<
#define PPSMC_MSG_SetSoftMinVpe 0x37 ///<
#define PPSMC_Message_Count 0x38 ///< Total number of PPSMC messages
#define PPSMC_MSG_AllocMALLCache 0x38 ///< Allocating MALL Cache
#define PPSMC_MSG_ReleaseMALLCache 0x39 ///< Releasing MALL Cache
#define PPSMC_Message_Count 0x3A ///< Total number of PPSMC messages
/** @}*/
/**

View File

@ -27,6 +27,7 @@
#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1
#define FEATURE_MASK(feature) (1ULL << feature)

View File

@ -2751,7 +2751,13 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
switch (mp1_state) {
case PP_MP1_STATE_UNLOAD:
ret = smu_cmn_set_mp1_state(smu, mp1_state);
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_PrepareMp1ForUnload,
0x55, NULL);
if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
ret = smu_v13_0_disable_pmfw_state(smu);
break;
default:
/* Ignore others */

View File

@ -226,8 +226,18 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
if (!en && !adev->in_s0ix)
if (!en && !adev->in_s0ix) {
/* Adds a GFX reset as workaround just before sending the
* MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
* an invalid state.
*/
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
SMU_RESET_MODE_2, NULL);
if (ret)
return ret;
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
}
return ret;
}

View File

@ -234,7 +234,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
break;
case IP_VERSION(14, 0, 1):
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1;
break;
default:

View File

@ -161,7 +161,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu)
SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, max(sizeof(DpmClocks_t), sizeof(DpmClocks_t_v14_0_1)),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@ -171,7 +171,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu)
goto err0_out;
smu_table->metrics_time = 0;
smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
smu_table->clocks_table = kzalloc(max(sizeof(DpmClocks_t), sizeof(DpmClocks_t_v14_0_1)), GFP_KERNEL);
if (!smu_table->clocks_table)
goto err1_out;
@ -593,6 +593,60 @@ static int smu_v14_0_0_mode2_reset(struct smu_context *smu)
return ret;
}
static int smu_v14_0_1_get_dpm_freq_by_index(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t dpm_level,
uint32_t *freq)
{
DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
if (!clk_table || clk_type >= SMU_CLK_COUNT)
return -EINVAL;
switch (clk_type) {
case SMU_SOCCLK:
if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
return -EINVAL;
*freq = clk_table->SocClocks[dpm_level];
break;
case SMU_VCLK:
if (dpm_level >= clk_table->Vcn0ClkLevelsEnabled)
return -EINVAL;
*freq = clk_table->VClocks0[dpm_level];
break;
case SMU_DCLK:
if (dpm_level >= clk_table->Vcn0ClkLevelsEnabled)
return -EINVAL;
*freq = clk_table->DClocks0[dpm_level];
break;
case SMU_VCLK1:
if (dpm_level >= clk_table->Vcn1ClkLevelsEnabled)
return -EINVAL;
*freq = clk_table->VClocks1[dpm_level];
break;
case SMU_DCLK1:
if (dpm_level >= clk_table->Vcn1ClkLevelsEnabled)
return -EINVAL;
*freq = clk_table->DClocks1[dpm_level];
break;
case SMU_UCLK:
case SMU_MCLK:
if (dpm_level >= clk_table->NumMemPstatesEnabled)
return -EINVAL;
*freq = clk_table->MemPstateTable[dpm_level].MemClk;
break;
case SMU_FCLK:
if (dpm_level >= clk_table->NumFclkLevelsEnabled)
return -EINVAL;
*freq = clk_table->FclkClocks_Freq[dpm_level];
break;
default:
return -EINVAL;
}
return 0;
}
static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t dpm_level,
@ -637,6 +691,19 @@ static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu,
return 0;
}
static int smu_v14_0_common_get_dpm_freq_by_index(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t dpm_level,
uint32_t *freq)
{
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq);
else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
smu_v14_0_1_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq);
return 0;
}
static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu,
enum smu_clk_type clk_type)
{
@ -657,6 +724,8 @@ static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu,
break;
case SMU_VCLK:
case SMU_DCLK:
case SMU_VCLK1:
case SMU_DCLK1:
feature_id = SMU_FEATURE_VCN_DPM_BIT;
break;
default:
@ -666,6 +735,126 @@ static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu,
return smu_cmn_feature_is_enabled(smu, feature_id);
}
static int smu_v14_0_1_get_dpm_ultimate_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min,
uint32_t *max)
{
DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
uint32_t clock_limit;
uint32_t max_dpm_level, min_dpm_level;
int ret = 0;
if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type)) {
switch (clk_type) {
case SMU_MCLK:
case SMU_UCLK:
clock_limit = smu->smu_table.boot_values.uclk;
break;
case SMU_FCLK:
clock_limit = smu->smu_table.boot_values.fclk;
break;
case SMU_GFXCLK:
case SMU_SCLK:
clock_limit = smu->smu_table.boot_values.gfxclk;
break;
case SMU_SOCCLK:
clock_limit = smu->smu_table.boot_values.socclk;
break;
case SMU_VCLK:
case SMU_VCLK1:
clock_limit = smu->smu_table.boot_values.vclk;
break;
case SMU_DCLK:
case SMU_DCLK1:
clock_limit = smu->smu_table.boot_values.dclk;
break;
default:
clock_limit = 0;
break;
}
/* clock in Mhz unit */
if (min)
*min = clock_limit / 100;
if (max)
*max = clock_limit / 100;
return 0;
}
if (max) {
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
*max = clk_table->MaxGfxClk;
break;
case SMU_MCLK:
case SMU_UCLK:
case SMU_FCLK:
max_dpm_level = 0;
break;
case SMU_SOCCLK:
max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1;
break;
case SMU_VCLK:
case SMU_DCLK:
max_dpm_level = clk_table->Vcn0ClkLevelsEnabled - 1;
break;
case SMU_VCLK1:
case SMU_DCLK1:
max_dpm_level = clk_table->Vcn1ClkLevelsEnabled - 1;
break;
default:
ret = -EINVAL;
goto failed;
}
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
if (ret)
goto failed;
}
}
if (min) {
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
*min = clk_table->MinGfxClk;
break;
case SMU_MCLK:
case SMU_UCLK:
min_dpm_level = clk_table->NumMemPstatesEnabled - 1;
break;
case SMU_FCLK:
min_dpm_level = clk_table->NumFclkLevelsEnabled - 1;
break;
case SMU_SOCCLK:
min_dpm_level = 0;
break;
case SMU_VCLK:
case SMU_DCLK:
case SMU_VCLK1:
case SMU_DCLK1:
min_dpm_level = 0;
break;
default:
ret = -EINVAL;
goto failed;
}
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
if (ret)
goto failed;
}
}
failed:
return ret;
}
static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min,
@ -736,7 +925,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
}
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
if (ret)
goto failed;
}
@ -768,7 +957,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
}
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
if (ret)
goto failed;
}
@ -778,6 +967,19 @@ failed:
return ret;
}
static int smu_v14_0_common_get_dpm_ultimate_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min,
uint32_t *max)
{
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
smu_v14_0_0_get_dpm_ultimate_freq(smu, clk_type, min, max);
else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
smu_v14_0_1_get_dpm_ultimate_freq(smu, clk_type, min, max);
return 0;
}
static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *value)
@ -811,6 +1013,37 @@ static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu,
return smu_v14_0_0_get_smu_metrics_data(smu, member_type, value);
}
static int smu_v14_0_1_get_dpm_level_count(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *count)
{
DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
switch (clk_type) {
case SMU_SOCCLK:
*count = clk_table->NumSocClkLevelsEnabled;
break;
case SMU_VCLK:
case SMU_DCLK:
*count = clk_table->Vcn0ClkLevelsEnabled;
break;
case SMU_VCLK1:
case SMU_DCLK1:
*count = clk_table->Vcn1ClkLevelsEnabled;
break;
case SMU_MCLK:
*count = clk_table->NumMemPstatesEnabled;
break;
case SMU_FCLK:
*count = clk_table->NumFclkLevelsEnabled;
break;
default:
break;
}
return 0;
}
static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *count)
@ -840,6 +1073,18 @@ static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu,
return 0;
}
static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *count)
{
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
smu_v14_0_0_get_dpm_level_count(smu, clk_type, count);
else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
smu_v14_0_1_get_dpm_level_count(smu, clk_type, count);
return 0;
}
static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
@ -866,18 +1111,20 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
case SMU_SOCCLK:
case SMU_VCLK:
case SMU_DCLK:
case SMU_VCLK1:
case SMU_DCLK1:
case SMU_MCLK:
case SMU_FCLK:
ret = smu_v14_0_0_get_current_clk_freq(smu, clk_type, &cur_value);
if (ret)
break;
ret = smu_v14_0_0_get_dpm_level_count(smu, clk_type, &count);
ret = smu_v14_0_common_get_dpm_level_count(smu, clk_type, &count);
if (ret)
break;
for (i = 0; i < count; i++) {
ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, i, &value);
if (ret)
break;
@ -940,8 +1187,13 @@ static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu,
break;
case SMU_VCLK:
case SMU_DCLK:
msg_set_min = SMU_MSG_SetHardMinVcn;
msg_set_max = SMU_MSG_SetSoftMaxVcn;
msg_set_min = SMU_MSG_SetHardMinVcn0;
msg_set_max = SMU_MSG_SetSoftMaxVcn0;
break;
case SMU_VCLK1:
case SMU_DCLK1:
msg_set_min = SMU_MSG_SetHardMinVcn1;
msg_set_max = SMU_MSG_SetSoftMaxVcn1;
break;
default:
return -EINVAL;
@ -971,11 +1223,11 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu,
case SMU_FCLK:
case SMU_VCLK:
case SMU_DCLK:
ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
if (ret)
break;
ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
if (ret)
break;
@ -1000,25 +1252,25 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu,
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max);
smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max);
smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max);
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max);
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max);
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max);
sclk_min = sclk_max;
fclk_min = fclk_max;
socclk_min = socclk_max;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL);
smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL);
smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL);
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL);
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL);
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL);
sclk_max = sclk_min;
fclk_max = fclk_min;
socclk_max = socclk_min;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max);
smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max);
smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max);
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max);
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max);
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
@ -1067,6 +1319,18 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu,
return ret;
}
static int smu_v14_0_1_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
{
DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
smu->gfx_default_hard_min_freq = clk_table->MinGfxClk;
smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk;
smu->gfx_actual_hard_min_freq = 0;
smu->gfx_actual_soft_max_freq = 0;
return 0;
}
static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
@ -1079,6 +1343,16 @@ static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *sm
return 0;
}
static int smu_v14_0_common_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
{
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
smu_v14_0_0_set_fine_grain_gfx_freq_parameters(smu);
else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
smu_v14_0_1_set_fine_grain_gfx_freq_parameters(smu);
return 0;
}
static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu,
bool enable)
{
@ -1095,6 +1369,25 @@ static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu,
0, NULL);
}
static int smu_14_0_1_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
{
DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
uint8_t idx;
/* Only the Clock information of SOC and VPE is copied to provide VPE DPM settings for use. */
for (idx = 0; idx < NUM_SOCCLK_DPM_LEVELS; idx++) {
clock_table->SocClocks[idx].Freq = (idx < clk_table->NumSocClkLevelsEnabled) ? clk_table->SocClocks[idx]:0;
clock_table->SocClocks[idx].Vol = 0;
}
for (idx = 0; idx < NUM_VPE_DPM_LEVELS; idx++) {
clock_table->VPEClocks[idx].Freq = (idx < clk_table->VpeClkLevelsEnabled) ? clk_table->VPEClocks[idx]:0;
clock_table->VPEClocks[idx].Vol = 0;
}
return 0;
}
static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
@ -1114,6 +1407,16 @@ static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *
return 0;
}
static int smu_v14_0_common_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
{
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
smu_14_0_0_get_dpm_table(smu, clock_table);
else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
smu_14_0_1_get_dpm_table(smu, clock_table);
return 0;
}
static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.check_fw_status = smu_v14_0_check_fw_status,
.check_fw_version = smu_v14_0_check_fw_version,
@ -1135,16 +1438,16 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.set_driver_table_location = smu_v14_0_set_driver_table_location,
.gfx_off_control = smu_v14_0_gfx_off_control,
.mode2_reset = smu_v14_0_0_mode2_reset,
.get_dpm_ultimate_freq = smu_v14_0_0_get_dpm_ultimate_freq,
.get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq,
.od_edit_dpm_table = smu_v14_0_od_edit_dpm_table,
.print_clk_levels = smu_v14_0_0_print_clk_levels,
.force_clk_levels = smu_v14_0_0_force_clk_levels,
.set_performance_level = smu_v14_0_0_set_performance_level,
.set_fine_grain_gfx_freq_parameters = smu_v14_0_0_set_fine_grain_gfx_freq_parameters,
.set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters,
.set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu,
.dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
.dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
.get_dpm_clock_table = smu_14_0_0_get_dpm_table,
.get_dpm_clock_table = smu_v14_0_common_get_dpm_table,
};
static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu)