forked from Minki/linux
Merge tag 'amd-drm-fixes-6.0-2022-09-29' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes
amd-drm-fixes-6.0-2022-09-29: amdgpu: - GC 11.x fixes - SMU 13.x fixes - DCN 3.1.4 fixes - DCN 3.2.x fixes - GC 9.x fix - Fence fix - SR-IOV supend/resume fix - PSR regression fix Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220929144003.8363-1-alexander.deucher@amd.com
This commit is contained in:
commit
91462afa42
@ -1050,6 +1050,10 @@ bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return false;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return false;
|
||||
|
||||
return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
|
||||
}
|
||||
|
||||
|
@ -3152,7 +3152,8 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
|
||||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
|
||||
|
||||
r = adev->ip_blocks[i].version->funcs->resume(adev);
|
||||
if (r) {
|
||||
@ -4064,12 +4065,20 @@ static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
|
||||
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
|
||||
{
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
int r = 0;
|
||||
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
||||
adev->in_suspend = true;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
amdgpu_virt_fini_data_exchange(adev);
|
||||
r = amdgpu_virt_request_full_gpu(adev, false);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
|
||||
DRM_WARN("smart shift update failed\n");
|
||||
|
||||
@ -4093,6 +4102,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
|
||||
|
||||
amdgpu_device_ip_suspend_phase2(adev);
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_release_full_gpu(adev, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -4111,6 +4123,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
int r = 0;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
r = amdgpu_virt_request_full_gpu(adev, true);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
||||
@ -4125,6 +4143,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
|
||||
}
|
||||
|
||||
r = amdgpu_device_ip_resume(adev);
|
||||
|
||||
/* no matter what r is, always need to properly release full GPU */
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
amdgpu_virt_init_data_exchange(adev);
|
||||
amdgpu_virt_release_full_gpu(adev, true);
|
||||
}
|
||||
|
||||
if (r) {
|
||||
dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
|
||||
return r;
|
||||
|
@ -400,7 +400,6 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
|
||||
/* We are not protected by ring lock when reading the last sequence
|
||||
* but it's ok to report slightly wrong fence count here.
|
||||
*/
|
||||
amdgpu_fence_process(ring);
|
||||
emitted = 0x100000000ull;
|
||||
emitted -= atomic_read(&ring->fence_drv.last_seq);
|
||||
emitted += READ_ONCE(ring->fence_drv.sync_seq);
|
||||
|
@ -222,6 +222,8 @@ struct mes_add_queue_input {
|
||||
uint64_t tba_addr;
|
||||
uint64_t tma_addr;
|
||||
uint32_t is_kfd_process;
|
||||
uint32_t is_aql_queue;
|
||||
uint32_t queue_size;
|
||||
};
|
||||
|
||||
struct mes_remove_queue_input {
|
||||
|
@ -5260,6 +5260,8 @@ static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
|
||||
{
|
||||
u32 reg, data;
|
||||
|
||||
amdgpu_gfx_off_ctrl(adev, false);
|
||||
|
||||
reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
|
||||
if (amdgpu_sriov_is_pp_one_vf(adev))
|
||||
data = RREG32_NO_KIQ(reg);
|
||||
@ -5273,6 +5275,8 @@ static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
|
||||
WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
|
||||
else
|
||||
WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data);
|
||||
|
||||
amdgpu_gfx_off_ctrl(adev, true);
|
||||
}
|
||||
|
||||
static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
|
||||
|
@ -5597,7 +5597,7 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
|
||||
BUG_ON(offset > ring->buf_mask);
|
||||
BUG_ON(ring->ring[offset] != 0x55aa55aa);
|
||||
|
||||
cur = (ring->wptr & ring->buf_mask) - 1;
|
||||
cur = (ring->wptr - 1) & ring->buf_mask;
|
||||
if (likely(cur > offset))
|
||||
ring->ring[offset] = cur - offset;
|
||||
else
|
||||
|
@ -185,6 +185,10 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
|
||||
mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
|
||||
mes_add_queue_pkt.trap_en = 1;
|
||||
|
||||
/* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
|
||||
mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
|
||||
mes_add_queue_pkt.gds_size = input->queue_size;
|
||||
|
||||
return mes_v11_0_submit_pkt_and_poll_completion(mes,
|
||||
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
|
||||
offsetof(union MESAPI__ADD_QUEUE, api_status));
|
||||
|
@ -205,6 +205,8 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
|
||||
}
|
||||
|
||||
queue_input.is_kfd_process = 1;
|
||||
queue_input.is_aql_queue = (q->properties.format == KFD_QUEUE_FORMAT_AQL);
|
||||
queue_input.queue_size = q->properties.queue_size >> 2;
|
||||
|
||||
queue_input.paging = false;
|
||||
queue_input.tba_addr = qpd->tba_addr;
|
||||
|
@ -350,11 +350,11 @@ static void event_interrupt_wq_v11(struct kfd_dev *dev,
|
||||
print_sq_intr_info_inst(context_id0, context_id1);
|
||||
sq_int_priv = REG_GET_FIELD(context_id0,
|
||||
SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV);
|
||||
if (sq_int_priv /*&& (kfd_set_dbg_ev_from_interrupt(dev, pasid,
|
||||
/*if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid,
|
||||
KFD_CTXID0_DOORBELL_ID(context_id0),
|
||||
KFD_CTXID0_TRAP_CODE(context_id0),
|
||||
NULL, 0))*/)
|
||||
return;
|
||||
NULL, 0)))
|
||||
return;*/
|
||||
break;
|
||||
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
|
||||
print_sq_intr_info_error(context_id0, context_id1);
|
||||
|
@ -126,6 +126,10 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
|
||||
m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
|
||||
m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
|
||||
m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
|
||||
m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF;
|
||||
m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF;
|
||||
m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF;
|
||||
m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF;
|
||||
|
||||
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
|
||||
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
|
||||
|
@ -170,7 +170,13 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
|
||||
&stream, 1,
|
||||
¶ms);
|
||||
|
||||
power_opt |= psr_power_opt_z10_static_screen;
|
||||
/*
|
||||
* Only enable static-screen optimizations for PSR1. For PSR SU, this
|
||||
* causes vstartup interrupt issues, used by amdgpu_dm to send vblank
|
||||
* events.
|
||||
*/
|
||||
if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
|
||||
power_opt |= psr_power_opt_z10_static_screen;
|
||||
|
||||
return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
|
||||
}
|
||||
|
@ -130,11 +130,20 @@ static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state
|
||||
if (pipe->top_pipe || pipe->prev_odm_pipe)
|
||||
continue;
|
||||
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
|
||||
struct stream_encoder *stream_enc = pipe->stream_res.stream_enc;
|
||||
|
||||
if (disable) {
|
||||
if (stream_enc && stream_enc->funcs->disable_fifo)
|
||||
pipe->stream_res.stream_enc->funcs->disable_fifo(stream_enc);
|
||||
|
||||
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
|
||||
reset_sync_context_for_pipe(dc, context, i);
|
||||
} else
|
||||
} else {
|
||||
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
|
||||
|
||||
if (stream_enc && stream_enc->funcs->enable_fifo)
|
||||
pipe->stream_res.stream_enc->funcs->enable_fifo(stream_enc);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -156,12 +156,14 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
|
||||
{
|
||||
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
||||
unsigned int num_levels;
|
||||
unsigned int num_dcfclk_levels, num_dtbclk_levels, num_dispclk_levels;
|
||||
|
||||
memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks));
|
||||
clk_mgr_base->clks.p_state_change_support = true;
|
||||
clk_mgr_base->clks.prev_p_state_change_support = true;
|
||||
clk_mgr_base->clks.fclk_prev_p_state_change_support = true;
|
||||
clk_mgr->smu_present = false;
|
||||
clk_mgr->dpm_present = false;
|
||||
|
||||
if (!clk_mgr_base->bw_params)
|
||||
return;
|
||||
@ -179,6 +181,7 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
|
||||
dcn32_init_single_clock(clk_mgr, PPCLK_DCFCLK,
|
||||
&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
|
||||
&num_levels);
|
||||
num_dcfclk_levels = num_levels;
|
||||
|
||||
/* SOCCLK */
|
||||
dcn32_init_single_clock(clk_mgr, PPCLK_SOCCLK,
|
||||
@ -189,11 +192,16 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
|
||||
dcn32_init_single_clock(clk_mgr, PPCLK_DTBCLK,
|
||||
&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
|
||||
&num_levels);
|
||||
num_dtbclk_levels = num_levels;
|
||||
|
||||
/* DISPCLK */
|
||||
dcn32_init_single_clock(clk_mgr, PPCLK_DISPCLK,
|
||||
&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
|
||||
&num_levels);
|
||||
num_dispclk_levels = num_levels;
|
||||
|
||||
if (num_dcfclk_levels && num_dtbclk_levels && num_dispclk_levels)
|
||||
clk_mgr->dpm_present = true;
|
||||
|
||||
if (clk_mgr_base->ctx->dc->debug.min_disp_clk_khz) {
|
||||
unsigned int i;
|
||||
@ -658,6 +666,12 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
|
||||
&num_levels);
|
||||
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
|
||||
|
||||
if (clk_mgr->dpm_present && !num_levels)
|
||||
clk_mgr->dpm_present = false;
|
||||
|
||||
if (!clk_mgr->dpm_present)
|
||||
dcn32_patch_dpm_table(clk_mgr_base->bw_params);
|
||||
|
||||
DC_FP_START();
|
||||
/* Refresh bounding box */
|
||||
clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
|
||||
|
@ -2164,8 +2164,7 @@ static void dce110_setup_audio_dto(
|
||||
continue;
|
||||
if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
|
||||
continue;
|
||||
if (pipe_ctx->stream_res.audio != NULL &&
|
||||
pipe_ctx->stream_res.audio->enabled == false) {
|
||||
if (pipe_ctx->stream_res.audio != NULL) {
|
||||
struct audio_output audio_output;
|
||||
|
||||
build_audio_output(context, pipe_ctx, &audio_output);
|
||||
@ -2205,8 +2204,7 @@ static void dce110_setup_audio_dto(
|
||||
if (!dc_is_dp_signal(pipe_ctx->stream->signal))
|
||||
continue;
|
||||
|
||||
if (pipe_ctx->stream_res.audio != NULL &&
|
||||
pipe_ctx->stream_res.audio->enabled == false) {
|
||||
if (pipe_ctx->stream_res.audio != NULL) {
|
||||
struct audio_output audio_output;
|
||||
|
||||
build_audio_output(context, pipe_ctx, &audio_output);
|
||||
|
@ -45,6 +45,48 @@
|
||||
#define DC_LOGGER \
|
||||
dccg->ctx->logger
|
||||
|
||||
static void dccg314_get_pixel_rate_div(
|
||||
struct dccg *dccg,
|
||||
uint32_t otg_inst,
|
||||
enum pixel_rate_div *k1,
|
||||
enum pixel_rate_div *k2)
|
||||
{
|
||||
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
|
||||
uint32_t val_k1 = PIXEL_RATE_DIV_NA, val_k2 = PIXEL_RATE_DIV_NA;
|
||||
|
||||
*k1 = PIXEL_RATE_DIV_NA;
|
||||
*k2 = PIXEL_RATE_DIV_NA;
|
||||
|
||||
switch (otg_inst) {
|
||||
case 0:
|
||||
REG_GET_2(OTG_PIXEL_RATE_DIV,
|
||||
OTG0_PIXEL_RATE_DIVK1, &val_k1,
|
||||
OTG0_PIXEL_RATE_DIVK2, &val_k2);
|
||||
break;
|
||||
case 1:
|
||||
REG_GET_2(OTG_PIXEL_RATE_DIV,
|
||||
OTG1_PIXEL_RATE_DIVK1, &val_k1,
|
||||
OTG1_PIXEL_RATE_DIVK2, &val_k2);
|
||||
break;
|
||||
case 2:
|
||||
REG_GET_2(OTG_PIXEL_RATE_DIV,
|
||||
OTG2_PIXEL_RATE_DIVK1, &val_k1,
|
||||
OTG2_PIXEL_RATE_DIVK2, &val_k2);
|
||||
break;
|
||||
case 3:
|
||||
REG_GET_2(OTG_PIXEL_RATE_DIV,
|
||||
OTG3_PIXEL_RATE_DIVK1, &val_k1,
|
||||
OTG3_PIXEL_RATE_DIVK2, &val_k2);
|
||||
break;
|
||||
default:
|
||||
BREAK_TO_DEBUGGER();
|
||||
return;
|
||||
}
|
||||
|
||||
*k1 = (enum pixel_rate_div)val_k1;
|
||||
*k2 = (enum pixel_rate_div)val_k2;
|
||||
}
|
||||
|
||||
static void dccg314_set_pixel_rate_div(
|
||||
struct dccg *dccg,
|
||||
uint32_t otg_inst,
|
||||
@ -52,6 +94,11 @@ static void dccg314_set_pixel_rate_div(
|
||||
enum pixel_rate_div k2)
|
||||
{
|
||||
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
|
||||
enum pixel_rate_div cur_k1 = PIXEL_RATE_DIV_NA, cur_k2 = PIXEL_RATE_DIV_NA;
|
||||
|
||||
dccg314_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2);
|
||||
if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA || (k1 == cur_k1 && k2 == cur_k2))
|
||||
return;
|
||||
|
||||
switch (otg_inst) {
|
||||
case 0:
|
||||
|
@ -56,7 +56,8 @@ static void enc314_enable_fifo(struct stream_encoder *enc)
|
||||
|
||||
/* TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON */
|
||||
REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000);
|
||||
REG_UPDATE_2(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1, DIG_FIFO_READ_START_LEVEL, 0x7);
|
||||
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
|
||||
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
|
||||
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000);
|
||||
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 0);
|
||||
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 0, 10, 5000);
|
||||
@ -261,6 +262,16 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
|
||||
return two_pix;
|
||||
}
|
||||
|
||||
void enc314_stream_encoder_dp_blank(
|
||||
struct dc_link *link,
|
||||
struct stream_encoder *enc)
|
||||
{
|
||||
/* New to DCN314 - disable the FIFO before VID stream disable. */
|
||||
enc314_disable_fifo(enc);
|
||||
|
||||
enc1_stream_encoder_dp_blank(link, enc);
|
||||
}
|
||||
|
||||
static void enc314_stream_encoder_dp_unblank(
|
||||
struct dc_link *link,
|
||||
struct stream_encoder *enc,
|
||||
@ -316,15 +327,11 @@ static void enc314_stream_encoder_dp_unblank(
|
||||
/* switch DP encoder to CRTC data, but reset it the fifo first. It may happen
|
||||
* that it overflows during mode transition, and sometimes doesn't recover.
|
||||
*/
|
||||
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
|
||||
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1);
|
||||
udelay(10);
|
||||
|
||||
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
|
||||
|
||||
/* DIG Resync FIFO now needs to be explicitly enabled. */
|
||||
enc314_enable_fifo(enc);
|
||||
|
||||
/* wait 100us for DIG/DP logic to prime
|
||||
* (i.e. a few video lines)
|
||||
*/
|
||||
@ -340,6 +347,12 @@ static void enc314_stream_encoder_dp_unblank(
|
||||
|
||||
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
|
||||
|
||||
/*
|
||||
* DIG Resync FIFO now needs to be explicitly enabled.
|
||||
* This should come after DP_VID_STREAM_ENABLE per HW docs.
|
||||
*/
|
||||
enc314_enable_fifo(enc);
|
||||
|
||||
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
|
||||
}
|
||||
|
||||
@ -408,7 +421,7 @@ static const struct stream_encoder_funcs dcn314_str_enc_funcs = {
|
||||
.stop_dp_info_packets =
|
||||
enc1_stream_encoder_stop_dp_info_packets,
|
||||
.dp_blank =
|
||||
enc1_stream_encoder_dp_blank,
|
||||
enc314_stream_encoder_dp_blank,
|
||||
.dp_unblank =
|
||||
enc314_stream_encoder_dp_unblank,
|
||||
.audio_mute_control = enc3_audio_mute_control,
|
||||
|
@ -42,6 +42,48 @@
|
||||
#define DC_LOGGER \
|
||||
dccg->ctx->logger
|
||||
|
||||
static void dccg32_get_pixel_rate_div(
|
||||
struct dccg *dccg,
|
||||
uint32_t otg_inst,
|
||||
enum pixel_rate_div *k1,
|
||||
enum pixel_rate_div *k2)
|
||||
{
|
||||
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
|
||||
uint32_t val_k1 = PIXEL_RATE_DIV_NA, val_k2 = PIXEL_RATE_DIV_NA;
|
||||
|
||||
*k1 = PIXEL_RATE_DIV_NA;
|
||||
*k2 = PIXEL_RATE_DIV_NA;
|
||||
|
||||
switch (otg_inst) {
|
||||
case 0:
|
||||
REG_GET_2(OTG_PIXEL_RATE_DIV,
|
||||
OTG0_PIXEL_RATE_DIVK1, &val_k1,
|
||||
OTG0_PIXEL_RATE_DIVK2, &val_k2);
|
||||
break;
|
||||
case 1:
|
||||
REG_GET_2(OTG_PIXEL_RATE_DIV,
|
||||
OTG1_PIXEL_RATE_DIVK1, &val_k1,
|
||||
OTG1_PIXEL_RATE_DIVK2, &val_k2);
|
||||
break;
|
||||
case 2:
|
||||
REG_GET_2(OTG_PIXEL_RATE_DIV,
|
||||
OTG2_PIXEL_RATE_DIVK1, &val_k1,
|
||||
OTG2_PIXEL_RATE_DIVK2, &val_k2);
|
||||
break;
|
||||
case 3:
|
||||
REG_GET_2(OTG_PIXEL_RATE_DIV,
|
||||
OTG3_PIXEL_RATE_DIVK1, &val_k1,
|
||||
OTG3_PIXEL_RATE_DIVK2, &val_k2);
|
||||
break;
|
||||
default:
|
||||
BREAK_TO_DEBUGGER();
|
||||
return;
|
||||
}
|
||||
|
||||
*k1 = (enum pixel_rate_div)val_k1;
|
||||
*k2 = (enum pixel_rate_div)val_k2;
|
||||
}
|
||||
|
||||
static void dccg32_set_pixel_rate_div(
|
||||
struct dccg *dccg,
|
||||
uint32_t otg_inst,
|
||||
@ -50,6 +92,17 @@ static void dccg32_set_pixel_rate_div(
|
||||
{
|
||||
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
|
||||
|
||||
enum pixel_rate_div cur_k1 = PIXEL_RATE_DIV_NA, cur_k2 = PIXEL_RATE_DIV_NA;
|
||||
|
||||
// Don't program 0xF into the register field. Not valid since
|
||||
// K1 / K2 field is only 1 / 2 bits wide
|
||||
if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA)
|
||||
return;
|
||||
|
||||
dccg32_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2);
|
||||
if (k1 == cur_k1 && k2 == cur_k2)
|
||||
return;
|
||||
|
||||
switch (otg_inst) {
|
||||
case 0:
|
||||
REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
|
||||
|
@ -98,9 +98,13 @@ static void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigne
|
||||
default:
|
||||
break;
|
||||
}
|
||||
/* Should never be hit, if it is we have an erroneous hw config*/
|
||||
ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
|
||||
+ hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs);
|
||||
if (hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
|
||||
+ hubbub2->det3_size + hubbub2->compbuf_size_segments > hubbub2->crb_size_segs) {
|
||||
/* This may happen during seamless transition from ODM 2:1 to ODM4:1 */
|
||||
DC_LOG_WARNING("CRB Config Warning: DET size (%d,%d,%d,%d) + Compbuf size (%d) > CRB segments (%d)\n",
|
||||
hubbub2->det0_size, hubbub2->det1_size, hubbub2->det2_size, hubbub2->det3_size,
|
||||
hubbub2->compbuf_size_segments, hubbub2->crb_size_segs);
|
||||
}
|
||||
}
|
||||
|
||||
static void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase)
|
||||
|
@ -121,8 +121,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
|
||||
},
|
||||
},
|
||||
.num_states = 1,
|
||||
.sr_exit_time_us = 20.16,
|
||||
.sr_enter_plus_exit_time_us = 27.13,
|
||||
.sr_exit_time_us = 42.97,
|
||||
.sr_enter_plus_exit_time_us = 49.94,
|
||||
.sr_exit_z8_time_us = 285.0,
|
||||
.sr_enter_plus_exit_z8_time_us = 320,
|
||||
.writeback_latency_us = 12.0,
|
||||
@ -1926,6 +1926,45 @@ static void remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st
|
||||
memset(&table[--(*num_entries)], 0, sizeof(struct _vcs_dpi_voltage_scaling_st));
|
||||
}
|
||||
|
||||
void dcn32_patch_dpm_table(struct clk_bw_params *bw_params)
|
||||
{
|
||||
int i;
|
||||
unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0,
|
||||
max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0;
|
||||
|
||||
for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
|
||||
if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
|
||||
max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
|
||||
if (bw_params->clk_table.entries[i].fclk_mhz > max_fclk_mhz)
|
||||
max_fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
|
||||
if (bw_params->clk_table.entries[i].memclk_mhz > max_uclk_mhz)
|
||||
max_uclk_mhz = bw_params->clk_table.entries[i].memclk_mhz;
|
||||
if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
|
||||
max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
|
||||
if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
|
||||
max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
|
||||
if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
|
||||
max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
|
||||
if (bw_params->clk_table.entries[i].dtbclk_mhz > max_dtbclk_mhz)
|
||||
max_dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
|
||||
}
|
||||
|
||||
/* Scan through clock values we currently have and if they are 0,
|
||||
* then populate it with dcn3_2_soc.clock_limits[] value.
|
||||
*
|
||||
* Do it for DCFCLK, DISPCLK, DTBCLK and UCLK as any of those being
|
||||
* 0, will cause it to skip building the clock table.
|
||||
*/
|
||||
if (max_dcfclk_mhz == 0)
|
||||
bw_params->clk_table.entries[0].dcfclk_mhz = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
|
||||
if (max_dispclk_mhz == 0)
|
||||
bw_params->clk_table.entries[0].dispclk_mhz = dcn3_2_soc.clock_limits[0].dispclk_mhz;
|
||||
if (max_dtbclk_mhz == 0)
|
||||
bw_params->clk_table.entries[0].dtbclk_mhz = dcn3_2_soc.clock_limits[0].dtbclk_mhz;
|
||||
if (max_uclk_mhz == 0)
|
||||
bw_params->clk_table.entries[0].memclk_mhz = dcn3_2_soc.clock_limits[0].dram_speed_mts / 16;
|
||||
}
|
||||
|
||||
static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
|
||||
struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
|
||||
{
|
||||
|
@ -77,4 +77,6 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
|
||||
int pipe_cnt,
|
||||
int vlevel);
|
||||
|
||||
void dcn32_patch_dpm_table(struct clk_bw_params *bw_params);
|
||||
|
||||
#endif
|
||||
|
@ -340,6 +340,8 @@ struct clk_mgr_internal {
|
||||
bool smu_present;
|
||||
void *wm_range_table;
|
||||
long long wm_range_table_addr;
|
||||
|
||||
bool dpm_present;
|
||||
};
|
||||
|
||||
struct clk_mgr_internal_funcs {
|
||||
|
@ -269,7 +269,8 @@ union MESAPI__ADD_QUEUE {
|
||||
uint32_t map_kiq_utility_queue : 1;
|
||||
uint32_t is_kfd_process : 1;
|
||||
uint32_t trap_en : 1;
|
||||
uint32_t reserved : 21;
|
||||
uint32_t is_aql_queue : 1;
|
||||
uint32_t reserved : 20;
|
||||
};
|
||||
struct MES_API_STATUS api_status;
|
||||
uint64_t tma_addr;
|
||||
|
@ -239,82 +239,47 @@ smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
|
||||
uint32_t *feature_mask, uint32_t num)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
u32 smu_version;
|
||||
|
||||
if (num > 2)
|
||||
return -EINVAL;
|
||||
|
||||
memset(feature_mask, 0, sizeof(uint32_t) * num);
|
||||
memset(feature_mask, 0xff, sizeof(uint32_t) * num);
|
||||
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DATA_READ_BIT);
|
||||
|
||||
if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) {
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT);
|
||||
if (!(adev->pm.pp_feature & PP_SCLK_DPM_MASK)) {
|
||||
*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
|
||||
*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_IMU_BIT);
|
||||
}
|
||||
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MM_DPM_BIT);
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_VCN_BIT);
|
||||
if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) ||
|
||||
!(adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
|
||||
*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
|
||||
|
||||
if ((adev->pg_flags & AMD_PG_SUPPORT_ATHUB) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
|
||||
if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK))
|
||||
*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
|
||||
|
||||
if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
|
||||
/* PMFW 78.58 contains a critical fix for gfxoff feature */
|
||||
smu_cmn_get_smc_version(smu, NULL, &smu_version);
|
||||
if ((smu_version < 0x004e3a00) ||
|
||||
!(adev->pm.pp_feature & PP_GFXOFF_MASK))
|
||||
*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT);
|
||||
|
||||
#if 0
|
||||
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
|
||||
#endif
|
||||
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_THROTTLERS_BIT);
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FAN_CONTROL_BIT);
|
||||
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DF_CSTATE_BIT);
|
||||
|
||||
if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) {
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
|
||||
if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) {
|
||||
*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
|
||||
*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
|
||||
*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
|
||||
}
|
||||
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MEM_TEMP_READ_BIT);
|
||||
if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK))
|
||||
*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
|
||||
|
||||
if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_MPCLK_DS_BIT);
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_MPCLK_DS_BIT);
|
||||
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_FCLK_BIT);
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCN_BIT);
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_DCFCLK_BIT);
|
||||
|
||||
if (adev->pm.pp_feature & PP_PCIE_DPM_MASK) {
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT);
|
||||
if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
|
||||
*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT);
|
||||
*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT);
|
||||
}
|
||||
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_BIT);
|
||||
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT);
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DSTATE_BIT);
|
||||
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT);
|
||||
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_FCLK_BIT);
|
||||
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_CTF_BIT);
|
||||
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_UCLK_BIT);
|
||||
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VR0HOT_BIT);
|
||||
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT);
|
||||
|
||||
if (adev->pm.pp_feature & PP_ULV_MASK)
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
|
||||
if (!(adev->pm.pp_feature & PP_ULV_MASK))
|
||||
*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user