drm fixes for 6.12-rc8

amdgpu:
 - PSR fix
 - Panel replay fixes
 - DML fix
 - vblank power fix
 - Fix video caps
 - SMU 14.0 fix
 - GPUVM fix
 - MES 12 fix
 - APU carve out fix
 - DC vbios fix
 - NBIO fix
 
 i915:
 - Don't load GSC on ARL-H and ARL-U if too old FW
 - Avoid potential OOPS in enabling/disabling TV output
 
 xe:
 - Fix unlock on exec ioctl error path
 - Fix hibernation on LNL due to ggtt getting lost
 - Fix missing runtime PM in OA release
 
 bridge:
 - tc358768: Fix DSI command tx
 
 nouveau:
 - Fix GSP AUX error handling
 - dp: Handle retires for AUX CH transfers with GSP
 - fw: Sync DMA after setup
 
 panthor:
 - Fix partial BO mappings to GPU
 
 rockchip:
 - vop: Avoid null-ptr deref in plane-state check
 
 vmwgfx:
 - Avoid null-ptr deref in surface creation
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmc3lU8ACgkQDHTzWXnE
 hr4u+Q/9GalCWjR6gCAgM2Rbm3h4M0FdkU0rDoVs1O5lFd6EPlTCAUFskVhmmgR7
 oRYu04Q5MO/nT9CWx3mpGAh28FRTJEI6sVZG2fOcCuEyyZ7GSa6MKvDSq/IrVRU/
 j05M4pFOHGH0z7sa8tDhYcgfP24W5cT+j5COjb9OW2ULd+I+Hf6sPA+h1ziDbZZA
 NsD+VENemt1KVRzCiNe330IImG/LaGjz304T8G991i5oJLFsOpFRuaMJo7mKOEHg
 BJAH/F8PZA6uzWtr3WIF4x8jZGTWftr0aHtfzIM716cTaMQfkKnr/15iQDkAEstI
 E4U6IZspPZ8+OPe7BBTPznd+JHhZi+V33kZt1cQWaM3bcTeQwEtEngYzNKEpDrjB
 gajl7rZZcaE1bLtDxkxT6E6HMNCSaaUlQ+kxoBJnlaGp2CsceuWPPyb6SXimn2Kh
 99bXI5tXLP/VYTN3rX6nctElyJbGF1wxlp8SPoVBL2+TS2DDZm4JEjJsljS7p2YX
 x+lxhcDp8xNd0IYyuWjxht5qAJdUdinX5E2X27gGdac3051vYw6VE4Xd/fzTqV1h
 kK0qbrhZjMWQU/2vngFaXReb3h8H0qct9hUM69012EKIlgLQR2NYfEuSkdSaQi7V
 aMh+9D45puhsUAhWZQa07tnB+Z59KytRNInmP4i7U2PF3CLZSlw=
 =6Ltb
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2024-11-16' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
 "Final week of fixes, lots of small amdgpu fixes, some i915 and xe
  fixes, the nouveau changes fix a recent regression and some laptop
  panel black screens, then a couple of other misc ones.

  It's probably a little busier than I'd like, but each fix seems fine.

  amdgpu:
   - PSR fix
   - Panel replay fixes
   - DML fix
   - vblank power fix
   - Fix video caps
   - SMU 14.0 fix
   - GPUVM fix
   - MES 12 fix
   - APU carve out fix
   - DC vbios fix
   - NBIO fix

  i915:
   - Don't load GSC on ARL-H and ARL-U if too old FW
   - Avoid potential OOPS in enabling/disabling TV output

  xe:
   - Fix unlock on exec ioctl error path
   - Fix hibernation on LNL due to ggtt getting lost
   - Fix missing runtime PM in OA release

  bridge:
   - tc358768: Fix DSI command tx

  nouveau:
   - Fix GSP AUX error handling
   - dp: Handle retires for AUX CH transfers with GSP
   - fw: Sync DMA after setup

  panthor:
   - Fix partial BO mappings to GPU

  rockchip:
   - vop: Avoid null-ptr deref in plane-state check

  vmwgfx:
   - Avoid null-ptr deref in surface creation"

* tag 'drm-fixes-2024-11-16' of https://gitlab.freedesktop.org/drm/kernel: (27 commits)
  drm/bridge: tc358768: Fix DSI command tx
  drm/vmwgfx: avoid null_ptr_deref in vmw_framebuffer_surface_create_handle
  nouveau/dp: handle retries for AUX CH transfers with GSP.
  nouveau: handle EBUSY and EAGAIN for GSP aux errors.
  nouveau: fw: sync dma after setup is called.
  drm/xe/oa: Fix "Missing outer runtime PM protection" warning
  drm/xe: handle flat ccs during hibernation on igpu
  drm/xe: improve hibernation on igpu
  drm/xe: Restore system memory GGTT mappings
  drm/xe: Ensure all locks released in exec IOCTL
  drm/panthor: Fix handling of partial GPU mapping of BOs
  drm/amd: Fix initialization mistake for NBIO 7.7.0
  Revert "drm/amd/display: parse umc_info or vram_info based on ASIC"
  drm/amd/display: Fix failure to read vram info due to static BP_RESULT
  drm/amdgpu: enable GTT fallback handling for dGPUs only
  drm/i915: Grab intel_display from the encoder to avoid potential oopsies
  drm/i915/gsc: ARL-H and ARL-U need a newer GSC FW.
  drm/amdgpu/mes12: correct kiq unmap latency
  drm/amdgpu: fix check in gmc_v9_0_get_vm_pte()
  drm/amd/pm: print pp_dpm_mclk in ascending order on SMU v14.0.0
  ...
This commit is contained in:
Linus Torvalds 2024-11-15 10:53:42 -08:00
commit f868cd2517
34 changed files with 305 additions and 197 deletions

View File

@ -161,7 +161,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
* When GTT is just an alternative to VRAM make sure that we
* only use it as fallback and still try to fill up VRAM first.
*/
if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
!(adev->flags & AMD_IS_APU))
places[c].flags |= TTM_PL_FLAG_FALLBACK;
c++;
}

View File

@ -1124,8 +1124,10 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
uint64_t *flags)
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM;
bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT | AMDGPU_GEM_CREATE_EXT_COHERENT);
bool is_vram = bo->tbo.resource &&
bo->tbo.resource->mem_type == TTM_PL_VRAM;
bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
AMDGPU_GEM_CREATE_EXT_COHERENT);
bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT;
bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
struct amdgpu_vm *vm = mapping->bo_va->base.vm;
@ -1133,6 +1135,8 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
bool snoop = false;
bool is_local;
dma_resv_assert_held(bo->tbo.base.resv);
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
@ -1251,9 +1255,8 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_VALID;
}
if (bo && bo->tbo.resource)
gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo,
mapping, flags);
if ((*flags & AMDGPU_PTE_VALID) && bo)
gmc_v9_0_get_coherence_flags(adev, bo, mapping, flags);
}
static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,

View File

@ -550,7 +550,7 @@ static int mes_v12_0_set_hw_resources_1(struct amdgpu_mes *mes, int pipe)
mes_set_hw_res_1_pkt.header.type = MES_API_TYPE_SCHEDULER;
mes_set_hw_res_1_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC_1;
mes_set_hw_res_1_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 100;
mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 0xa;
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
&mes_set_hw_res_1_pkt, sizeof(mes_set_hw_res_1_pkt),

View File

@ -247,6 +247,12 @@ static void nbio_v7_7_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_SOC15(NBIO, 0, regBIF0_PCIE_MST_CTRL_3, data);
switch (adev->ip_versions[NBIO_HWIP][0]) {
case IP_VERSION(7, 7, 0):
data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4) & ~BIT(23);
WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4, data);
break;
}
}
static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,

View File

@ -67,8 +67,8 @@ static const struct amd_ip_funcs nv_common_ip_funcs;
/* Navi */
static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 0)},
};
static const struct amdgpu_video_codecs nv_video_codecs_encode = {
@ -94,8 +94,8 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode = {
/* Sienna Cichlid */
static const struct amdgpu_video_codec_info sc_video_codecs_encode_array[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs sc_video_codecs_encode = {
@ -136,8 +136,8 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 = {
/* SRIOV Sienna Cichlid, not const since data is controlled by host */
static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
};
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = {

View File

@ -90,8 +90,8 @@ static const struct amd_ip_funcs soc15_common_ip_funcs;
/* Vega, Raven, Arcturus */
static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
{
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 0)},
};
static const struct amdgpu_video_codecs vega_video_codecs_encode =

View File

@ -49,13 +49,13 @@ static const struct amd_ip_funcs soc21_common_ip_funcs;
/* SOC21 */
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
};
@ -96,14 +96,14 @@ static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 = {
/* SRIOV SOC21, not const since data is controlled by host */
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
};
static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn0 = {

View File

@ -48,7 +48,7 @@
static const struct amd_ip_funcs soc24_common_ip_funcs;
static const struct amdgpu_video_codec_info vcn_5_0_0_video_codecs_encode_array_vcn0[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};

View File

@ -136,15 +136,15 @@ static const struct amdgpu_video_codec_info polaris_video_codecs_encode_array[]
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
.max_width = 4096,
.max_height = 2304,
.max_pixels_per_frame = 4096 * 2304,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 0,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
.max_width = 4096,
.max_height = 2304,
.max_pixels_per_frame = 4096 * 2304,
.max_height = 4096,
.max_pixels_per_frame = 4096 * 4096,
.max_level = 0,
},
};

View File

@ -6762,7 +6762,7 @@ create_stream_for_sink(struct drm_connector *connector,
if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22)
tf = TRANSFER_FUNC_GAMMA_22;
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
aconnector->sr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
}
finish:
@ -8875,6 +8875,56 @@ static void amdgpu_dm_update_cursor(struct drm_plane *plane,
}
}
static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
const struct dm_crtc_state *acrtc_state,
const u64 current_ts)
{
struct psr_settings *psr = &acrtc_state->stream->link->psr_settings;
struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
struct amdgpu_dm_connector *aconn =
(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
if (pr->config.replay_supported && !pr->replay_feature_enabled)
amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
else if (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED &&
!psr->psr_feature_enabled)
if (!aconn->disallow_edp_enter_psr)
amdgpu_dm_link_setup_psr(acrtc_state->stream);
}
/* Decrement skip count when SR is enabled and we're doing fast updates. */
if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
(psr->psr_feature_enabled || pr->config.replay_supported)) {
if (aconn->sr_skip_count > 0)
aconn->sr_skip_count--;
/* Allow SR when skip count is 0. */
acrtc_attach->dm_irq_params.allow_sr_entry = !aconn->sr_skip_count;
/*
* If sink supports PSR SU/Panel Replay, there is no need to rely on
* a vblank event disable request to enable PSR/RP. PSR SU/RP
* can be enabled immediately once OS demonstrates an
* adequate number of fast atomic commits to notify KMD
* of update events. See `vblank_control_worker()`.
*/
if (acrtc_attach->dm_irq_params.allow_sr_entry &&
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
#endif
(current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) {
if (pr->replay_feature_enabled && !pr->replay_allow_active)
amdgpu_dm_replay_enable(acrtc_state->stream, true);
if (psr->psr_version >= DC_PSR_VERSION_SU_1 &&
!psr->psr_allow_active && !aconn->disallow_edp_enter_psr)
amdgpu_dm_psr_enable(acrtc_state->stream);
}
} else {
acrtc_attach->dm_irq_params.allow_sr_entry = false;
}
}
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct drm_device *dev,
struct amdgpu_display_manager *dm,
@ -9028,7 +9078,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* during the PSR-SU was disabled.
*/
if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
acrtc_attach->dm_irq_params.allow_psr_entry &&
acrtc_attach->dm_irq_params.allow_sr_entry &&
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
#endif
@ -9203,9 +9253,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->stream_update.abm_level = &acrtc_state->abm_level;
mutex_lock(&dm->dc_lock);
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
acrtc_state->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(acrtc_state->stream);
if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
if (acrtc_state->stream->link->replay_settings.replay_allow_active)
amdgpu_dm_replay_disable(acrtc_state->stream);
if (acrtc_state->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(acrtc_state->stream);
}
mutex_unlock(&dm->dc_lock);
/*
@ -9246,57 +9299,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dm_update_pflip_irq_state(drm_to_adev(dev),
acrtc_attach);
if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
if (acrtc_state->stream->link->replay_settings.config.replay_supported &&
!acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
struct amdgpu_dm_connector *aconn =
(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
} else if (acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
!acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
acrtc_state->stream->dm_stream_context;
if (!aconn->disallow_edp_enter_psr)
amdgpu_dm_link_setup_psr(acrtc_state->stream);
}
}
/* Decrement skip count when PSR is enabled and we're doing fast updates. */
if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
struct amdgpu_dm_connector *aconn =
(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
if (aconn->psr_skip_count > 0)
aconn->psr_skip_count--;
/* Allow PSR when skip count is 0. */
acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
/*
* If sink supports PSR SU, there is no need to rely on
* a vblank event disable request to enable PSR. PSR SU
* can be enabled immediately once OS demonstrates an
* adequate number of fast atomic commits to notify KMD
* of update events. See `vblank_control_worker()`.
*/
if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
acrtc_attach->dm_irq_params.allow_psr_entry &&
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
#endif
!acrtc_state->stream->link->psr_settings.psr_allow_active &&
!aconn->disallow_edp_enter_psr &&
(timestamp_ns -
acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
500000000)
amdgpu_dm_psr_enable(acrtc_state->stream);
} else {
acrtc_attach->dm_irq_params.allow_psr_entry = false;
}
amdgpu_dm_enable_self_refresh(acrtc_attach, acrtc_state, timestamp_ns);
mutex_unlock(&dm->dc_lock);
}
@ -12080,7 +12083,7 @@ static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
break;
}
while (j < EDID_LENGTH) {
while (j < EDID_LENGTH - sizeof(struct amd_vsdb_block)) {
struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j];
unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]);

View File

@ -727,7 +727,7 @@ struct amdgpu_dm_connector {
/* Cached display modes */
struct drm_display_mode freesync_vid_base;
int psr_skip_count;
int sr_skip_count;
bool disallow_edp_enter_psr;
/* Record progress status of mst*/

View File

@ -266,11 +266,10 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
* where the SU region is the full hactive*vactive region. See
* fill_dc_dirty_rects().
*/
if (vblank_work->stream && vblank_work->stream->link) {
if (vblank_work->stream && vblank_work->stream->link && vblank_work->acrtc) {
amdgpu_dm_crtc_set_panel_sr_feature(
vblank_work, vblank_work->enable,
vblank_work->acrtc->dm_irq_params.allow_psr_entry ||
vblank_work->stream->link->replay_settings.replay_feature_enabled);
vblank_work->acrtc->dm_irq_params.allow_sr_entry);
}
if (dm->active_vblank_irq_count == 0) {

View File

@ -33,7 +33,7 @@ struct dm_irq_params {
struct mod_vrr_params vrr_params;
struct dc_stream_state *stream;
int active_planes;
bool allow_psr_entry;
bool allow_sr_entry;
struct mod_freesync_config freesync_config;
#ifdef CONFIG_DEBUG_FS

View File

@ -3122,14 +3122,12 @@ static enum bp_result bios_parser_get_vram_info(
struct dc_vram_info *info)
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
static enum bp_result result = BP_RESULT_BADBIOSTABLE;
enum bp_result result = BP_RESULT_BADBIOSTABLE;
struct atom_common_table_header *header;
struct atom_data_revision revision;
// vram info moved to umc_info for DCN4x
if (dcb->ctx->dce_version >= DCN_VERSION_4_01 &&
dcb->ctx->dce_version < DCN_VERSION_MAX &&
info && DATA_TABLES(umc_info)) {
if (info && DATA_TABLES(umc_info)) {
header = GET_IMAGE(struct atom_common_table_header,
DATA_TABLES(umc_info));

View File

@ -265,6 +265,9 @@ struct dc_state *dc_state_create_copy(struct dc_state *src_state)
dc_state_copy_internal(new_state, src_state);
#ifdef CONFIG_DRM_AMD_DC_FP
new_state->bw_ctx.dml2 = NULL;
new_state->bw_ctx.dml2_dc_power_source = NULL;
if (src_state->bw_ctx.dml2 &&
!dml2_create_copy(&new_state->bw_ctx.dml2, src_state->bw_ctx.dml2)) {
dc_state_release(new_state);

View File

@ -8,6 +8,7 @@
#include "dml2_pmo_dcn4_fams2.h"
static const double MIN_VACTIVE_MARGIN_PCT = 0.25; // We need more than non-zero margin because DET buffer granularity can alter vactive latency hiding
static const double MIN_BLANK_STUTTER_FACTOR = 3.0;
static const struct dml2_pmo_pstate_strategy base_strategy_list_1_display[] = {
// VActive Preferred
@ -2139,6 +2140,7 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in
struct dml2_pmo_instance *pmo = in_out->instance;
bool stutter_period_meets_z8_eco = true;
bool z8_stutter_optimization_too_expensive = false;
bool stutter_optimization_too_expensive = false;
double line_time_us, vblank_nom_time_us;
unsigned int i;
@ -2160,10 +2162,15 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in
line_time_us = (double)in_out->base_display_config->display_config.stream_descriptors[i].timing.h_total / (in_out->base_display_config->display_config.stream_descriptors[i].timing.pixel_clock_khz * 1000) * 1000000;
vblank_nom_time_us = line_time_us * in_out->base_display_config->display_config.stream_descriptors[i].timing.vblank_nom;
if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us) {
if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us * MIN_BLANK_STUTTER_FACTOR) {
z8_stutter_optimization_too_expensive = true;
break;
}
if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us * MIN_BLANK_STUTTER_FACTOR) {
stutter_optimization_too_expensive = true;
break;
}
}
pmo->scratch.pmo_dcn4.num_stutter_candidates = 0;
@ -2179,7 +2186,7 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in
pmo->scratch.pmo_dcn4.z8_vblank_optimizable = false;
}
if (pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0) {
if (!stutter_optimization_too_expensive && pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0) {
pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.num_stutter_candidates] = (unsigned int)pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us;
pmo->scratch.pmo_dcn4.num_stutter_candidates++;
}

View File

@ -1132,7 +1132,7 @@ static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu,
static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
int i, size = 0, ret = 0;
int i, idx, ret = 0, size = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max;
@ -1168,7 +1168,8 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
break;
for (i = 0; i < count; i++) {
ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, i, &value);
idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i;
ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, idx, &value);
if (ret)
break;

View File

@ -125,6 +125,9 @@
#define TC358768_DSI_CONFW_MODE_CLR (6 << 29)
#define TC358768_DSI_CONFW_ADDR_DSI_CONTROL (0x3 << 24)
/* TC358768_DSICMD_TX (0x0600) register */
#define TC358768_DSI_CMDTX_DC_START BIT(0)
static const char * const tc358768_supplies[] = {
"vddc", "vddmipi", "vddio"
};
@ -229,6 +232,21 @@ static void tc358768_update_bits(struct tc358768_priv *priv, u32 reg, u32 mask,
tc358768_write(priv, reg, tmp);
}
static void tc358768_dsicmd_tx(struct tc358768_priv *priv)
{
u32 val;
/* start transfer */
tc358768_write(priv, TC358768_DSICMD_TX, TC358768_DSI_CMDTX_DC_START);
if (priv->error)
return;
/* wait transfer completion */
priv->error = regmap_read_poll_timeout(priv->regmap, TC358768_DSICMD_TX, val,
(val & TC358768_DSI_CMDTX_DC_START) == 0,
100, 100000);
}
static int tc358768_sw_reset(struct tc358768_priv *priv)
{
/* Assert Reset */
@ -516,8 +534,7 @@ static ssize_t tc358768_dsi_host_transfer(struct mipi_dsi_host *host,
}
}
/* start transfer */
tc358768_write(priv, TC358768_DSICMD_TX, 1);
tc358768_dsicmd_tx(priv);
ret = tc358768_clear_error(priv);
if (ret)

View File

@ -928,7 +928,7 @@ intel_enable_tv(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(state);
struct intel_display *display = to_intel_display(encoder);
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
intel_crtc_wait_for_next_vblank(to_intel_crtc(pipe_config->uapi.crtc));
@ -942,7 +942,7 @@ intel_disable_tv(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(state);
struct intel_display *display = to_intel_display(encoder);
intel_de_rmw(display, TV_CTL, TV_ENC_ENABLE, 0);
}

View File

@ -80,6 +80,7 @@ int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, s
const struct intel_gsc_cpd_header_v2 *cpd_header = NULL;
const struct intel_gsc_cpd_entry *cpd_entry = NULL;
const struct intel_gsc_manifest_header *manifest;
struct intel_uc_fw_ver min_ver = { 0 };
size_t min_size = sizeof(*layout);
int i;
@ -212,33 +213,46 @@ int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, s
}
}
if (IS_ARROWLAKE(gt->i915)) {
/*
* ARL SKUs require newer firmwares, but the blob is actually common
* across all MTL and ARL SKUs, so we need to do an explicit version check
* here rather than using a separate table entry. If a too old version
* is found, then just don't use GSC rather than aborting the driver load.
* Note that the major number in the GSC FW version is used to indicate
* the platform, so we expect it to always be 102 for MTL/ARL binaries.
*/
if (IS_ARROWLAKE_S(gt->i915))
min_ver = (struct intel_uc_fw_ver){ 102, 0, 10, 1878 };
else if (IS_ARROWLAKE_H(gt->i915) || IS_ARROWLAKE_U(gt->i915))
min_ver = (struct intel_uc_fw_ver){ 102, 1, 15, 1926 };
if (IS_METEORLAKE(gt->i915) && gsc->release.major != 102) {
gt_info(gt, "Invalid GSC firmware for MTL/ARL, got %d.%d.%d.%d but need 102.x.x.x",
gsc->release.major, gsc->release.minor,
gsc->release.patch, gsc->release.build);
return -EINVAL;
}
if (min_ver.major) {
bool too_old = false;
/*
* ARL requires a newer firmware than MTL did (102.0.10.1878) but the
* firmware is actually common. So, need to do an explicit version check
* here rather than using a separate table entry. And if the older
* MTL-only version is found, then just don't use GSC rather than aborting
* the driver load.
*/
if (gsc->release.major < 102) {
if (gsc->release.minor < min_ver.minor) {
too_old = true;
} else if (gsc->release.major == 102) {
if (gsc->release.minor == 0) {
if (gsc->release.patch < 10) {
} else if (gsc->release.minor == min_ver.minor) {
if (gsc->release.patch < min_ver.patch) {
too_old = true;
} else if (gsc->release.patch == min_ver.patch) {
if (gsc->release.build < min_ver.build)
too_old = true;
} else if (gsc->release.patch == 10) {
if (gsc->release.build < 1878)
too_old = true;
}
}
}
if (too_old) {
gt_info(gt, "GSC firmware too old for ARL, got %d.%d.%d.%d but need at least 102.0.10.1878",
gt_info(gt, "GSC firmware too old for ARL, got %d.%d.%d.%d but need at least %d.%d.%d.%d",
gsc->release.major, gsc->release.minor,
gsc->release.patch, gsc->release.build);
gsc->release.patch, gsc->release.build,
min_ver.major, min_ver.minor,
min_ver.patch, min_ver.build);
return -EINVAL;
}
}

View File

@ -540,8 +540,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_LUNARLAKE(i915) (0 && i915)
#define IS_BATTLEMAGE(i915) (0 && i915)
#define IS_ARROWLAKE(i915) \
IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL)
#define IS_ARROWLAKE_H(i915) \
IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_H)
#define IS_ARROWLAKE_U(i915) \
IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_U)
#define IS_ARROWLAKE_S(i915) \
IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_S)
#define IS_DG2_G10(i915) \
IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G10)
#define IS_DG2_G11(i915) \

View File

@ -200,8 +200,16 @@ static const u16 subplatform_g12_ids[] = {
INTEL_DG2_G12_IDS(ID),
};
static const u16 subplatform_arl_ids[] = {
INTEL_ARL_IDS(ID),
static const u16 subplatform_arl_h_ids[] = {
INTEL_ARL_H_IDS(ID),
};
static const u16 subplatform_arl_u_ids[] = {
INTEL_ARL_U_IDS(ID),
};
static const u16 subplatform_arl_s_ids[] = {
INTEL_ARL_S_IDS(ID),
};
static bool find_devid(u16 id, const u16 *p, unsigned int num)
@ -261,9 +269,15 @@ static void intel_device_info_subplatform_init(struct drm_i915_private *i915)
} else if (find_devid(devid, subplatform_g12_ids,
ARRAY_SIZE(subplatform_g12_ids))) {
mask = BIT(INTEL_SUBPLATFORM_G12);
} else if (find_devid(devid, subplatform_arl_ids,
ARRAY_SIZE(subplatform_arl_ids))) {
mask = BIT(INTEL_SUBPLATFORM_ARL);
} else if (find_devid(devid, subplatform_arl_h_ids,
ARRAY_SIZE(subplatform_arl_h_ids))) {
mask = BIT(INTEL_SUBPLATFORM_ARL_H);
} else if (find_devid(devid, subplatform_arl_u_ids,
ARRAY_SIZE(subplatform_arl_u_ids))) {
mask = BIT(INTEL_SUBPLATFORM_ARL_U);
} else if (find_devid(devid, subplatform_arl_s_ids,
ARRAY_SIZE(subplatform_arl_s_ids))) {
mask = BIT(INTEL_SUBPLATFORM_ARL_S);
}
GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_MASK);

View File

@ -128,7 +128,9 @@ enum intel_platform {
#define INTEL_SUBPLATFORM_RPLU 2
/* MTL */
#define INTEL_SUBPLATFORM_ARL 0
#define INTEL_SUBPLATFORM_ARL_H 0
#define INTEL_SUBPLATFORM_ARL_U 1
#define INTEL_SUBPLATFORM_ARL_S 2
enum intel_ppgtt_type {
INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,

View File

@ -992,7 +992,7 @@ r535_dp_train_target(struct nvkm_outp *outp, u8 target, bool mst, u8 link_nr, u8
ctrl->data = data;
ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
if (ret == -EAGAIN && ctrl->retryTimeMs) {
if ((ret == -EAGAIN || ret == -EBUSY) && ctrl->retryTimeMs) {
/*
* Device (likely an eDP panel) isn't ready yet, wait for the time specified
* by GSP before retrying again
@ -1060,33 +1060,44 @@ r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize)
NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *ctrl;
u8 size = *psize;
int ret;
int retries;
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_AUXCH_CTRL, sizeof(*ctrl));
if (IS_ERR(ctrl))
return PTR_ERR(ctrl);
for (retries = 0; retries < 3; ++retries) {
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_AUXCH_CTRL, sizeof(*ctrl));
if (IS_ERR(ctrl))
return PTR_ERR(ctrl);
ctrl->subDeviceInstance = 0;
ctrl->displayId = BIT(outp->index);
ctrl->bAddrOnly = !size;
ctrl->cmd = type;
if (ctrl->bAddrOnly) {
ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, REQ_TYPE, WRITE);
ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, I2C_MOT, FALSE);
ctrl->subDeviceInstance = 0;
ctrl->displayId = BIT(outp->index);
ctrl->bAddrOnly = !size;
ctrl->cmd = type;
if (ctrl->bAddrOnly) {
ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, REQ_TYPE, WRITE);
ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, I2C_MOT, FALSE);
}
ctrl->addr = addr;
ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0;
memcpy(ctrl->data, data, size);
ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
if ((ret == -EAGAIN || ret == -EBUSY) && ctrl->retryTimeMs) {
/*
* Device (likely an eDP panel) isn't ready yet, wait for the time specified
* by GSP before retrying again
*/
nvkm_debug(&disp->engine.subdev,
"Waiting %dms for GSP LT panel delay before retrying in AUX\n",
ctrl->retryTimeMs);
msleep(ctrl->retryTimeMs);
nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
} else {
memcpy(data, ctrl->data, size);
*psize = ctrl->size;
ret = ctrl->replyType;
nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
break;
}
}
ctrl->addr = addr;
ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0;
memcpy(ctrl->data, data, size);
ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
if (ret) {
nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
return ret;
}
memcpy(data, ctrl->data, size);
*psize = ctrl->size;
ret = ctrl->replyType;
nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
return ret;
}

View File

@ -89,11 +89,6 @@ nvkm_falcon_fw_boot(struct nvkm_falcon_fw *fw, struct nvkm_subdev *user,
nvkm_falcon_fw_dtor_sigs(fw);
}
/* after last write to the img, sync dma mappings */
dma_sync_single_for_device(fw->fw.device->dev,
fw->fw.phys,
sg_dma_len(&fw->fw.mem.sgl),
DMA_TO_DEVICE);
FLCNFW_DBG(fw, "resetting");
fw->func->reset(fw);
@ -105,6 +100,12 @@ nvkm_falcon_fw_boot(struct nvkm_falcon_fw *fw, struct nvkm_subdev *user,
goto done;
}
/* after last write to the img, sync dma mappings */
dma_sync_single_for_device(fw->fw.device->dev,
fw->fw.phys,
sg_dma_len(&fw->fw.mem.sgl),
DMA_TO_DEVICE);
ret = fw->func->load(fw);
if (ret)
goto done;

View File

@ -78,7 +78,7 @@ r535_rpc_status_to_errno(uint32_t rpc_status)
switch (rpc_status) {
case 0x55: /* NV_ERR_NOT_READY */
case 0x66: /* NV_ERR_TIMEOUT_RETRY */
return -EAGAIN;
return -EBUSY;
case 0x51: /* NV_ERR_NO_MEMORY */
return -ENOMEM;
default:
@ -601,7 +601,7 @@ r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
if (rpc->status) {
ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status));
if (PTR_ERR(ret) != -EAGAIN)
if (PTR_ERR(ret) != -EAGAIN && PTR_ERR(ret) != -EBUSY)
nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
} else {
ret = repc ? rpc->params : NULL;
@ -660,7 +660,7 @@ r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **argv, u32 repc)
if (rpc->status) {
ret = r535_rpc_status_to_errno(rpc->status);
if (ret != -EAGAIN)
if (ret != -EAGAIN && ret != -EBUSY)
nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
object->client->object.handle, object->handle, rpc->cmd, rpc->status);
}

View File

@ -990,6 +990,8 @@ panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot,
if (!size)
break;
offset = 0;
}
return panthor_vm_flush_range(vm, start_iova, iova - start_iova);

View File

@ -1093,10 +1093,10 @@ static int vop_plane_atomic_async_check(struct drm_plane *plane,
if (!plane->state->fb)
return -EINVAL;
if (state)
crtc_state = drm_atomic_get_existing_crtc_state(state,
new_plane_state->crtc);
else /* Special case for asynchronous cursor updates. */
crtc_state = drm_atomic_get_existing_crtc_state(state, new_plane_state->crtc);
/* Special case for asynchronous cursor updates. */
if (!crtc_state)
crtc_state = plane->crtc->state;
return drm_atomic_helper_check_plane_state(plane->state, crtc_state,

View File

@ -1265,6 +1265,8 @@ static int vmw_framebuffer_surface_create_handle(struct drm_framebuffer *fb,
struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(fb);
struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
if (WARN_ON(!bo))
return -EINVAL;
return drm_gem_handle_create(file_priv, &bo->tbo.base, handle);
}

View File

@ -886,8 +886,8 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
if (WARN_ON(!xe_bo_is_pinned(bo)))
return -EINVAL;
if (WARN_ON(!xe_bo_is_vram(bo)))
return -EINVAL;
if (!xe_bo_is_vram(bo))
return 0;
ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx);
if (ret)
@ -937,6 +937,7 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
.interruptible = false,
};
struct ttm_resource *new_mem;
struct ttm_place *place = &bo->placements[0];
int ret;
xe_bo_assert_held(bo);
@ -947,9 +948,15 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
if (WARN_ON(!xe_bo_is_pinned(bo)))
return -EINVAL;
if (WARN_ON(xe_bo_is_vram(bo) || !bo->ttm.ttm))
if (WARN_ON(xe_bo_is_vram(bo)))
return -EINVAL;
if (WARN_ON(!bo->ttm.ttm && !xe_bo_is_stolen(bo)))
return -EINVAL;
if (!mem_type_is_vram(place->mem_type))
return 0;
ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx);
if (ret)
return ret;
@ -1719,6 +1726,7 @@ int xe_bo_pin_external(struct xe_bo *bo)
int xe_bo_pin(struct xe_bo *bo)
{
struct ttm_place *place = &bo->placements[0];
struct xe_device *xe = xe_bo_device(bo);
int err;
@ -1749,21 +1757,21 @@ int xe_bo_pin(struct xe_bo *bo)
*/
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
struct ttm_place *place = &(bo->placements[0]);
if (mem_type_is_vram(place->mem_type)) {
xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS);
place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) -
vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
spin_lock(&xe->pinned.lock);
list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
spin_unlock(&xe->pinned.lock);
}
}
if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
spin_lock(&xe->pinned.lock);
list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
spin_unlock(&xe->pinned.lock);
}
ttm_bo_pin(&bo->ttm);
/*
@ -1809,23 +1817,18 @@ void xe_bo_unpin_external(struct xe_bo *bo)
void xe_bo_unpin(struct xe_bo *bo)
{
struct ttm_place *place = &bo->placements[0];
struct xe_device *xe = xe_bo_device(bo);
xe_assert(xe, !bo->ttm.base.import_attach);
xe_assert(xe, xe_bo_is_pinned(bo));
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
struct ttm_place *place = &(bo->placements[0]);
if (mem_type_is_vram(place->mem_type)) {
spin_lock(&xe->pinned.lock);
xe_assert(xe, !list_empty(&bo->pinned_link));
list_del_init(&bo->pinned_link);
spin_unlock(&xe->pinned.lock);
}
if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
spin_lock(&xe->pinned.lock);
xe_assert(xe, !list_empty(&bo->pinned_link));
list_del_init(&bo->pinned_link);
spin_unlock(&xe->pinned.lock);
}
ttm_bo_unpin(&bo->ttm);
}

View File

@ -34,14 +34,22 @@ int xe_bo_evict_all(struct xe_device *xe)
u8 id;
int ret;
if (!IS_DGFX(xe))
return 0;
/* User memory */
for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
for (mem_type = XE_PL_TT; mem_type <= XE_PL_VRAM1; ++mem_type) {
struct ttm_resource_manager *man =
ttm_manager_type(bdev, mem_type);
/*
* On igpu platforms with flat CCS we need to ensure we save and restore any CCS
* state since this state lives inside graphics stolen memory which doesn't survive
* hibernation.
*
* This can be further improved by only evicting objects that we know have actually
* used a compression enabled PAT index.
*/
if (mem_type == XE_PL_TT && (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe)))
continue;
if (man) {
ret = ttm_resource_manager_evict_all(bdev, man);
if (ret)
@ -125,9 +133,6 @@ int xe_bo_restore_kernel(struct xe_device *xe)
struct xe_bo *bo;
int ret;
if (!IS_DGFX(xe))
return 0;
spin_lock(&xe->pinned.lock);
for (;;) {
bo = list_first_entry_or_null(&xe->pinned.evicted,
@ -159,7 +164,6 @@ int xe_bo_restore_kernel(struct xe_device *xe)
* should setup the iosys map.
*/
xe_assert(xe, !iosys_map_is_null(&bo->vmap));
xe_assert(xe, xe_bo_is_vram(bo));
xe_bo_put(bo);

View File

@ -203,14 +203,14 @@ retry:
write_locked = false;
}
if (err)
goto err_syncs;
goto err_hw_exec_mode;
if (write_locked) {
err = xe_vm_userptr_pin(vm);
downgrade_write(&vm->lock);
write_locked = false;
if (err)
goto err_hw_exec_mode;
goto err_unlock_list;
}
if (!args->num_batch_buffer) {

View File

@ -1206,9 +1206,11 @@ static int xe_oa_release(struct inode *inode, struct file *file)
struct xe_oa_stream *stream = file->private_data;
struct xe_gt *gt = stream->gt;
xe_pm_runtime_get(gt_to_xe(gt));
mutex_lock(&gt->oa.gt_lock);
xe_oa_destroy_locked(stream);
mutex_unlock(&gt->oa.gt_lock);
xe_pm_runtime_put(gt_to_xe(gt));
/* Release the reference the OA stream kept on the driver */
drm_dev_put(&gt_to_xe(gt)->drm);

View File

@ -771,13 +771,24 @@
INTEL_ATS_M150_IDS(MACRO__, ## __VA_ARGS__), \
INTEL_ATS_M75_IDS(MACRO__, ## __VA_ARGS__)
/* MTL */
#define INTEL_ARL_IDS(MACRO__, ...) \
MACRO__(0x7D41, ## __VA_ARGS__), \
/* ARL */
#define INTEL_ARL_H_IDS(MACRO__, ...) \
MACRO__(0x7D51, ## __VA_ARGS__), \
MACRO__(0x7D67, ## __VA_ARGS__), \
MACRO__(0x7DD1, ## __VA_ARGS__)
#define INTEL_ARL_U_IDS(MACRO__, ...) \
MACRO__(0x7D41, ## __VA_ARGS__) \
#define INTEL_ARL_S_IDS(MACRO__, ...) \
MACRO__(0x7D67, ## __VA_ARGS__), \
MACRO__(0xB640, ## __VA_ARGS__)
#define INTEL_ARL_IDS(MACRO__, ...) \
INTEL_ARL_H_IDS(MACRO__, ## __VA_ARGS__), \
INTEL_ARL_U_IDS(MACRO__, ## __VA_ARGS__), \
INTEL_ARL_S_IDS(MACRO__, ## __VA_ARGS__)
/* MTL */
#define INTEL_MTL_IDS(MACRO__, ...) \
INTEL_ARL_IDS(MACRO__, ## __VA_ARGS__), \
MACRO__(0x7D40, ## __VA_ARGS__), \