amd-drm-fixes-6.12-2024-11-14:

amdgpu:
 - PSR fix
 - Panel replay fixes
 - DML fix
 - vblank power fix
 - Fix video caps
 - SMU 14.0 fix
 - GPUVM fix
 - MES 12 fix
 - APU carve out fix
 - DC vbios fix
 - NBIO fix
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCZzYIpwAKCRC93/aFa7yZ
 2NVgAQDz3MsCT5vRhIkjBQ8VJqi/k23vvzoHRvBWE7JrfejmuQD+IvDPKmXvneQK
 M+3vSqDIbpmfwXEghHBdWXTMJqPZNg8=
 =UkPM
 -----END PGP SIGNATURE-----

Merge tag 'amd-drm-fixes-6.12-2024-11-14' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.12-2024-11-14:

amdgpu:
- PSR fix
- Panel replay fixes
- DML fix
- vblank power fix
- Fix video caps
- SMU 14.0 fix
- GPUVM fix
- MES 12 fix
- APU carve out fix
- DC vbios fix
- NBIO fix

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20241114143401.448210-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie 2024-11-15 06:48:49 +10:00
commit 1eb0de899b
17 changed files with 117 additions and 96 deletions

View File

@ -161,7 +161,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
* When GTT is just an alternative to VRAM make sure that we * When GTT is just an alternative to VRAM make sure that we
* only use it as fallback and still try to fill up VRAM first. * only use it as fallback and still try to fill up VRAM first.
*/ */
if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
!(adev->flags & AMD_IS_APU))
places[c].flags |= TTM_PL_FLAG_FALLBACK; places[c].flags |= TTM_PL_FLAG_FALLBACK;
c++; c++;
} }

View File

@ -1124,8 +1124,10 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
uint64_t *flags) uint64_t *flags)
{ {
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM; bool is_vram = bo->tbo.resource &&
bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT | AMDGPU_GEM_CREATE_EXT_COHERENT); bo->tbo.resource->mem_type == TTM_PL_VRAM;
bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
AMDGPU_GEM_CREATE_EXT_COHERENT);
bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT; bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT;
bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED; bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
struct amdgpu_vm *vm = mapping->bo_va->base.vm; struct amdgpu_vm *vm = mapping->bo_va->base.vm;
@ -1133,6 +1135,8 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
bool snoop = false; bool snoop = false;
bool is_local; bool is_local;
dma_resv_assert_held(bo->tbo.base.resv);
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 1): case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2): case IP_VERSION(9, 4, 2):
@ -1251,9 +1255,8 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_VALID; *flags &= ~AMDGPU_PTE_VALID;
} }
if (bo && bo->tbo.resource) if ((*flags & AMDGPU_PTE_VALID) && bo)
gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo, gmc_v9_0_get_coherence_flags(adev, bo, mapping, flags);
mapping, flags);
} }
static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,

View File

@ -550,7 +550,7 @@ static int mes_v12_0_set_hw_resources_1(struct amdgpu_mes *mes, int pipe)
mes_set_hw_res_1_pkt.header.type = MES_API_TYPE_SCHEDULER; mes_set_hw_res_1_pkt.header.type = MES_API_TYPE_SCHEDULER;
mes_set_hw_res_1_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC_1; mes_set_hw_res_1_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC_1;
mes_set_hw_res_1_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; mes_set_hw_res_1_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 100; mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 0xa;
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
&mes_set_hw_res_1_pkt, sizeof(mes_set_hw_res_1_pkt), &mes_set_hw_res_1_pkt, sizeof(mes_set_hw_res_1_pkt),

View File

@ -247,6 +247,12 @@ static void nbio_v7_7_init_registers(struct amdgpu_device *adev)
if (def != data) if (def != data)
WREG32_SOC15(NBIO, 0, regBIF0_PCIE_MST_CTRL_3, data); WREG32_SOC15(NBIO, 0, regBIF0_PCIE_MST_CTRL_3, data);
switch (adev->ip_versions[NBIO_HWIP][0]) {
case IP_VERSION(7, 7, 0):
data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4) & ~BIT(23);
WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4, data);
break;
}
} }
static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *adev, static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,

View File

@ -67,8 +67,8 @@ static const struct amd_ip_funcs nv_common_ip_funcs;
/* Navi */ /* Navi */
static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] = { static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 0)},
}; };
static const struct amdgpu_video_codecs nv_video_codecs_encode = { static const struct amdgpu_video_codecs nv_video_codecs_encode = {
@ -94,8 +94,8 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode = {
/* Sienna Cichlid */ /* Sienna Cichlid */
static const struct amdgpu_video_codec_info sc_video_codecs_encode_array[] = { static const struct amdgpu_video_codec_info sc_video_codecs_encode_array[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
}; };
static const struct amdgpu_video_codecs sc_video_codecs_encode = { static const struct amdgpu_video_codecs sc_video_codecs_encode = {
@ -136,8 +136,8 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 = {
/* SRIOV Sienna Cichlid, not const since data is controlled by host */ /* SRIOV Sienna Cichlid, not const since data is controlled by host */
static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = { static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
}; };
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = { static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = {

View File

@ -90,8 +90,8 @@ static const struct amd_ip_funcs soc15_common_ip_funcs;
/* Vega, Raven, Arcturus */ /* Vega, Raven, Arcturus */
static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] = static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
{ {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 0)},
}; };
static const struct amdgpu_video_codecs vega_video_codecs_encode = static const struct amdgpu_video_codecs vega_video_codecs_encode =

View File

@ -49,13 +49,13 @@ static const struct amd_ip_funcs soc21_common_ip_funcs;
/* SOC21 */ /* SOC21 */
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = { static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
}; };
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = { static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
}; };
@ -96,14 +96,14 @@ static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 = {
/* SRIOV SOC21, not const since data is controlled by host */ /* SRIOV SOC21, not const since data is controlled by host */
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn0[] = { static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
}; };
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn1[] = { static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
}; };
static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn0 = { static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn0 = {

View File

@ -48,7 +48,7 @@
static const struct amd_ip_funcs soc24_common_ip_funcs; static const struct amd_ip_funcs soc24_common_ip_funcs;
static const struct amdgpu_video_codec_info vcn_5_0_0_video_codecs_encode_array_vcn0[] = { static const struct amdgpu_video_codec_info vcn_5_0_0_video_codecs_encode_array_vcn0[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
}; };

View File

@ -136,15 +136,15 @@ static const struct amdgpu_video_codec_info polaris_video_codecs_encode_array[]
{ {
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
.max_width = 4096, .max_width = 4096,
.max_height = 2304, .max_height = 4096,
.max_pixels_per_frame = 4096 * 2304, .max_pixels_per_frame = 4096 * 4096,
.max_level = 0, .max_level = 0,
}, },
{ {
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
.max_width = 4096, .max_width = 4096,
.max_height = 2304, .max_height = 4096,
.max_pixels_per_frame = 4096 * 2304, .max_pixels_per_frame = 4096 * 4096,
.max_level = 0, .max_level = 0,
}, },
}; };

View File

@ -6762,7 +6762,7 @@ create_stream_for_sink(struct drm_connector *connector,
if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22)
tf = TRANSFER_FUNC_GAMMA_22; tf = TRANSFER_FUNC_GAMMA_22;
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; aconnector->sr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
} }
finish: finish:
@ -8875,6 +8875,56 @@ static void amdgpu_dm_update_cursor(struct drm_plane *plane,
} }
} }
static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
const struct dm_crtc_state *acrtc_state,
const u64 current_ts)
{
struct psr_settings *psr = &acrtc_state->stream->link->psr_settings;
struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
struct amdgpu_dm_connector *aconn =
(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
if (pr->config.replay_supported && !pr->replay_feature_enabled)
amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
else if (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED &&
!psr->psr_feature_enabled)
if (!aconn->disallow_edp_enter_psr)
amdgpu_dm_link_setup_psr(acrtc_state->stream);
}
/* Decrement skip count when SR is enabled and we're doing fast updates. */
if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
(psr->psr_feature_enabled || pr->config.replay_supported)) {
if (aconn->sr_skip_count > 0)
aconn->sr_skip_count--;
/* Allow SR when skip count is 0. */
acrtc_attach->dm_irq_params.allow_sr_entry = !aconn->sr_skip_count;
/*
* If sink supports PSR SU/Panel Replay, there is no need to rely on
* a vblank event disable request to enable PSR/RP. PSR SU/RP
* can be enabled immediately once OS demonstrates an
* adequate number of fast atomic commits to notify KMD
* of update events. See `vblank_control_worker()`.
*/
if (acrtc_attach->dm_irq_params.allow_sr_entry &&
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
#endif
(current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) {
if (pr->replay_feature_enabled && !pr->replay_allow_active)
amdgpu_dm_replay_enable(acrtc_state->stream, true);
if (psr->psr_version >= DC_PSR_VERSION_SU_1 &&
!psr->psr_allow_active && !aconn->disallow_edp_enter_psr)
amdgpu_dm_psr_enable(acrtc_state->stream);
}
} else {
acrtc_attach->dm_irq_params.allow_sr_entry = false;
}
}
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct drm_device *dev, struct drm_device *dev,
struct amdgpu_display_manager *dm, struct amdgpu_display_manager *dm,
@ -9028,7 +9078,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* during the PSR-SU was disabled. * during the PSR-SU was disabled.
*/ */
if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
acrtc_attach->dm_irq_params.allow_psr_entry && acrtc_attach->dm_irq_params.allow_sr_entry &&
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
#endif #endif
@ -9203,9 +9253,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->stream_update.abm_level = &acrtc_state->abm_level; bundle->stream_update.abm_level = &acrtc_state->abm_level;
mutex_lock(&dm->dc_lock); mutex_lock(&dm->dc_lock);
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
acrtc_state->stream->link->psr_settings.psr_allow_active) if (acrtc_state->stream->link->replay_settings.replay_allow_active)
amdgpu_dm_psr_disable(acrtc_state->stream); amdgpu_dm_replay_disable(acrtc_state->stream);
if (acrtc_state->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(acrtc_state->stream);
}
mutex_unlock(&dm->dc_lock); mutex_unlock(&dm->dc_lock);
/* /*
@ -9246,57 +9299,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dm_update_pflip_irq_state(drm_to_adev(dev), dm_update_pflip_irq_state(drm_to_adev(dev),
acrtc_attach); acrtc_attach);
if (acrtc_state->update_type > UPDATE_TYPE_FAST) { amdgpu_dm_enable_self_refresh(acrtc_attach, acrtc_state, timestamp_ns);
if (acrtc_state->stream->link->replay_settings.config.replay_supported &&
!acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
struct amdgpu_dm_connector *aconn =
(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
} else if (acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
!acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
acrtc_state->stream->dm_stream_context;
if (!aconn->disallow_edp_enter_psr)
amdgpu_dm_link_setup_psr(acrtc_state->stream);
}
}
/* Decrement skip count when PSR is enabled and we're doing fast updates. */
if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
struct amdgpu_dm_connector *aconn =
(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
if (aconn->psr_skip_count > 0)
aconn->psr_skip_count--;
/* Allow PSR when skip count is 0. */
acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
/*
* If sink supports PSR SU, there is no need to rely on
* a vblank event disable request to enable PSR. PSR SU
* can be enabled immediately once OS demonstrates an
* adequate number of fast atomic commits to notify KMD
* of update events. See `vblank_control_worker()`.
*/
if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
acrtc_attach->dm_irq_params.allow_psr_entry &&
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
#endif
!acrtc_state->stream->link->psr_settings.psr_allow_active &&
!aconn->disallow_edp_enter_psr &&
(timestamp_ns -
acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
500000000)
amdgpu_dm_psr_enable(acrtc_state->stream);
} else {
acrtc_attach->dm_irq_params.allow_psr_entry = false;
}
mutex_unlock(&dm->dc_lock); mutex_unlock(&dm->dc_lock);
} }
@ -12080,7 +12083,7 @@ static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
break; break;
} }
while (j < EDID_LENGTH) { while (j < EDID_LENGTH - sizeof(struct amd_vsdb_block)) {
struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j]; struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j];
unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]); unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]);

View File

@ -727,7 +727,7 @@ struct amdgpu_dm_connector {
/* Cached display modes */ /* Cached display modes */
struct drm_display_mode freesync_vid_base; struct drm_display_mode freesync_vid_base;
int psr_skip_count; int sr_skip_count;
bool disallow_edp_enter_psr; bool disallow_edp_enter_psr;
/* Record progress status of mst*/ /* Record progress status of mst*/

View File

@ -266,11 +266,10 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
* where the SU region is the full hactive*vactive region. See * where the SU region is the full hactive*vactive region. See
* fill_dc_dirty_rects(). * fill_dc_dirty_rects().
*/ */
if (vblank_work->stream && vblank_work->stream->link) { if (vblank_work->stream && vblank_work->stream->link && vblank_work->acrtc) {
amdgpu_dm_crtc_set_panel_sr_feature( amdgpu_dm_crtc_set_panel_sr_feature(
vblank_work, vblank_work->enable, vblank_work, vblank_work->enable,
vblank_work->acrtc->dm_irq_params.allow_psr_entry || vblank_work->acrtc->dm_irq_params.allow_sr_entry);
vblank_work->stream->link->replay_settings.replay_feature_enabled);
} }
if (dm->active_vblank_irq_count == 0) { if (dm->active_vblank_irq_count == 0) {

View File

@ -33,7 +33,7 @@ struct dm_irq_params {
struct mod_vrr_params vrr_params; struct mod_vrr_params vrr_params;
struct dc_stream_state *stream; struct dc_stream_state *stream;
int active_planes; int active_planes;
bool allow_psr_entry; bool allow_sr_entry;
struct mod_freesync_config freesync_config; struct mod_freesync_config freesync_config;
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS

View File

@ -3122,14 +3122,12 @@ static enum bp_result bios_parser_get_vram_info(
struct dc_vram_info *info) struct dc_vram_info *info)
{ {
struct bios_parser *bp = BP_FROM_DCB(dcb); struct bios_parser *bp = BP_FROM_DCB(dcb);
static enum bp_result result = BP_RESULT_BADBIOSTABLE; enum bp_result result = BP_RESULT_BADBIOSTABLE;
struct atom_common_table_header *header; struct atom_common_table_header *header;
struct atom_data_revision revision; struct atom_data_revision revision;
// vram info moved to umc_info for DCN4x // vram info moved to umc_info for DCN4x
if (dcb->ctx->dce_version >= DCN_VERSION_4_01 && if (info && DATA_TABLES(umc_info)) {
dcb->ctx->dce_version < DCN_VERSION_MAX &&
info && DATA_TABLES(umc_info)) {
header = GET_IMAGE(struct atom_common_table_header, header = GET_IMAGE(struct atom_common_table_header,
DATA_TABLES(umc_info)); DATA_TABLES(umc_info));

View File

@ -265,6 +265,9 @@ struct dc_state *dc_state_create_copy(struct dc_state *src_state)
dc_state_copy_internal(new_state, src_state); dc_state_copy_internal(new_state, src_state);
#ifdef CONFIG_DRM_AMD_DC_FP #ifdef CONFIG_DRM_AMD_DC_FP
new_state->bw_ctx.dml2 = NULL;
new_state->bw_ctx.dml2_dc_power_source = NULL;
if (src_state->bw_ctx.dml2 && if (src_state->bw_ctx.dml2 &&
!dml2_create_copy(&new_state->bw_ctx.dml2, src_state->bw_ctx.dml2)) { !dml2_create_copy(&new_state->bw_ctx.dml2, src_state->bw_ctx.dml2)) {
dc_state_release(new_state); dc_state_release(new_state);

View File

@ -8,6 +8,7 @@
#include "dml2_pmo_dcn4_fams2.h" #include "dml2_pmo_dcn4_fams2.h"
static const double MIN_VACTIVE_MARGIN_PCT = 0.25; // We need more than non-zero margin because DET buffer granularity can alter vactive latency hiding static const double MIN_VACTIVE_MARGIN_PCT = 0.25; // We need more than non-zero margin because DET buffer granularity can alter vactive latency hiding
static const double MIN_BLANK_STUTTER_FACTOR = 3.0;
static const struct dml2_pmo_pstate_strategy base_strategy_list_1_display[] = { static const struct dml2_pmo_pstate_strategy base_strategy_list_1_display[] = {
// VActive Preferred // VActive Preferred
@ -2139,6 +2140,7 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in
struct dml2_pmo_instance *pmo = in_out->instance; struct dml2_pmo_instance *pmo = in_out->instance;
bool stutter_period_meets_z8_eco = true; bool stutter_period_meets_z8_eco = true;
bool z8_stutter_optimization_too_expensive = false; bool z8_stutter_optimization_too_expensive = false;
bool stutter_optimization_too_expensive = false;
double line_time_us, vblank_nom_time_us; double line_time_us, vblank_nom_time_us;
unsigned int i; unsigned int i;
@ -2160,10 +2162,15 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in
line_time_us = (double)in_out->base_display_config->display_config.stream_descriptors[i].timing.h_total / (in_out->base_display_config->display_config.stream_descriptors[i].timing.pixel_clock_khz * 1000) * 1000000; line_time_us = (double)in_out->base_display_config->display_config.stream_descriptors[i].timing.h_total / (in_out->base_display_config->display_config.stream_descriptors[i].timing.pixel_clock_khz * 1000) * 1000000;
vblank_nom_time_us = line_time_us * in_out->base_display_config->display_config.stream_descriptors[i].timing.vblank_nom; vblank_nom_time_us = line_time_us * in_out->base_display_config->display_config.stream_descriptors[i].timing.vblank_nom;
if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us) { if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us * MIN_BLANK_STUTTER_FACTOR) {
z8_stutter_optimization_too_expensive = true; z8_stutter_optimization_too_expensive = true;
break; break;
} }
if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us * MIN_BLANK_STUTTER_FACTOR) {
stutter_optimization_too_expensive = true;
break;
}
} }
pmo->scratch.pmo_dcn4.num_stutter_candidates = 0; pmo->scratch.pmo_dcn4.num_stutter_candidates = 0;
@ -2179,7 +2186,7 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in
pmo->scratch.pmo_dcn4.z8_vblank_optimizable = false; pmo->scratch.pmo_dcn4.z8_vblank_optimizable = false;
} }
if (pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0) { if (!stutter_optimization_too_expensive && pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0) {
pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.num_stutter_candidates] = (unsigned int)pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us; pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.num_stutter_candidates] = (unsigned int)pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us;
pmo->scratch.pmo_dcn4.num_stutter_candidates++; pmo->scratch.pmo_dcn4.num_stutter_candidates++;
} }

View File

@ -1132,7 +1132,7 @@ static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu,
static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf) enum smu_clk_type clk_type, char *buf)
{ {
int i, size = 0, ret = 0; int i, idx, ret = 0, size = 0;
uint32_t cur_value = 0, value = 0, count = 0; uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max; uint32_t min, max;
@ -1168,7 +1168,8 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
break; break;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, i, &value); idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i;
ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, idx, &value);
if (ret) if (ret)
break; break;