mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
amd-drm-fixes-6.5-2023-07-20:
amdgpu: - More PCIe DPM fixes for Intel platforms - DCN3.0.1 fixes - Virtual display timer fix - Async flip fix - SMU13 clock reporting fixes - Add missing PSP firmware declaration - DP MST fix - DCN3.1.x fixes - Slab out of bounds fix -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCZLk2sQAKCRC93/aFa7yZ 2I7jAP9D26a1N0J3bJd24L4h8FHYRauz1F+z1QFTNv1f51PJJwEAkDpp3xsFb2dM YBaLDyeSASlgAB3DQ79cyCtpVBCcOQs= =nvAQ -----END PGP SIGNATURE----- Merge tag 'amd-drm-fixes-6.5-2023-07-20' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes amd-drm-fixes-6.5-2023-07-20: amdgpu: - More PCIe DPM fixes for Intel platforms - DCN3.0.1 fixes - Virtual display timer fix - Async flip fix - SMU13 clock reporting fixes - Add missing PSP firmware declaration - DP MST fix - DCN3.1.x fixes - Slab out of bounds fix Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230720133456.7826-1-alexander.deucher@amd.com
This commit is contained in:
commit
28801cc859
@ -1709,7 +1709,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
||||
alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
|
||||
}
|
||||
xcp_id = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id;
|
||||
xcp_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ?
|
||||
0 : fpriv->xcp_id;
|
||||
} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
|
||||
domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||
alloc_flags = 0;
|
||||
|
@ -1229,13 +1229,13 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
||||
pasid = 0;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_init(adev, &fpriv->vm);
|
||||
r = amdgpu_xcp_open_device(adev, fpriv, file_priv);
|
||||
if (r)
|
||||
goto error_pasid;
|
||||
|
||||
r = amdgpu_xcp_open_device(adev, fpriv, file_priv);
|
||||
r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id);
|
||||
if (r)
|
||||
goto error_vm;
|
||||
goto error_pasid;
|
||||
|
||||
r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid);
|
||||
if (r)
|
||||
|
@ -1382,7 +1382,7 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev)
|
||||
goto error_pasid;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_init(adev, vm);
|
||||
r = amdgpu_vm_init(adev, vm, -1);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to initialize vm\n");
|
||||
goto error_pasid;
|
||||
|
@ -55,8 +55,9 @@ static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer)
|
||||
DRM_WARN("%s: vblank timer overrun\n", __func__);
|
||||
|
||||
ret = drm_crtc_handle_vblank(crtc);
|
||||
/* Don't queue timer again when vblank is disabled. */
|
||||
if (!ret)
|
||||
DRM_ERROR("amdgpu_vkms failure on handling vblank");
|
||||
return HRTIMER_NORESTART;
|
||||
|
||||
return HRTIMER_RESTART;
|
||||
}
|
||||
@ -81,7 +82,7 @@ static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
hrtimer_cancel(&amdgpu_crtc->vblank_timer);
|
||||
hrtimer_try_to_cancel(&amdgpu_crtc->vblank_timer);
|
||||
}
|
||||
|
||||
static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
|
||||
|
@ -2121,13 +2121,14 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @vm: requested vm
|
||||
* @xcp_id: GPU partition selection id
|
||||
*
|
||||
* Init @vm fields.
|
||||
*
|
||||
* Returns:
|
||||
* 0 for success, error for failure.
|
||||
*/
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id)
|
||||
{
|
||||
struct amdgpu_bo *root_bo;
|
||||
struct amdgpu_bo_vm *root;
|
||||
@ -2177,7 +2178,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
vm->evicting = false;
|
||||
|
||||
r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
|
||||
false, &root);
|
||||
false, &root, xcp_id);
|
||||
if (r)
|
||||
goto error_free_delayed;
|
||||
root_bo = &root->bo;
|
||||
|
@ -392,7 +392,7 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
u32 pasid);
|
||||
|
||||
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
|
||||
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
@ -475,7 +475,8 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
|
||||
int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo_vm *vmbo, bool immediate);
|
||||
int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
int level, bool immediate, struct amdgpu_bo_vm **vmbo);
|
||||
int level, bool immediate, struct amdgpu_bo_vm **vmbo,
|
||||
int32_t xcp_id);
|
||||
void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
|
@ -498,11 +498,12 @@ exit:
|
||||
* @level: the page table level
|
||||
* @immediate: use a immediate update
|
||||
* @vmbo: pointer to the buffer object pointer
|
||||
* @xcp_id: GPU partition id
|
||||
*/
|
||||
int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
int level, bool immediate, struct amdgpu_bo_vm **vmbo)
|
||||
int level, bool immediate, struct amdgpu_bo_vm **vmbo,
|
||||
int32_t xcp_id)
|
||||
{
|
||||
struct amdgpu_fpriv *fpriv = container_of(vm, struct amdgpu_fpriv, vm);
|
||||
struct amdgpu_bo_param bp;
|
||||
struct amdgpu_bo *bo;
|
||||
struct dma_resv *resv;
|
||||
@ -535,7 +536,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
|
||||
bp.type = ttm_bo_type_kernel;
|
||||
bp.no_wait_gpu = immediate;
|
||||
bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1;
|
||||
bp.xcp_id_plus1 = xcp_id + 1;
|
||||
|
||||
if (vm->root.bo)
|
||||
bp.resv = vm->root.bo->tbo.base.resv;
|
||||
@ -561,7 +562,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
bp.type = ttm_bo_type_kernel;
|
||||
bp.resv = bo->tbo.base.resv;
|
||||
bp.bo_ptr_size = sizeof(struct amdgpu_bo);
|
||||
bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1;
|
||||
bp.xcp_id_plus1 = xcp_id + 1;
|
||||
|
||||
r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow);
|
||||
|
||||
@ -606,7 +607,8 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
|
||||
amdgpu_vm_eviction_unlock(vm);
|
||||
r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt);
|
||||
r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt,
|
||||
vm->root.bo->xcp_id);
|
||||
amdgpu_vm_eviction_lock(vm);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -363,7 +363,7 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
|
||||
if (!adev->xcp_mgr)
|
||||
return 0;
|
||||
|
||||
fpriv->xcp_id = ~0;
|
||||
fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION;
|
||||
for (i = 0; i < MAX_XCP; ++i) {
|
||||
if (!adev->xcp_mgr->xcp[i].ddev)
|
||||
break;
|
||||
@ -381,7 +381,7 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
|
||||
}
|
||||
}
|
||||
|
||||
fpriv->vm.mem_id = fpriv->xcp_id == ~0 ? -1 :
|
||||
fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :
|
||||
adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
|
||||
return 0;
|
||||
}
|
||||
|
@ -37,6 +37,8 @@
|
||||
#define AMDGPU_XCP_FL_NONE 0
|
||||
#define AMDGPU_XCP_FL_LOCKED (1 << 0)
|
||||
|
||||
#define AMDGPU_XCP_NO_PARTITION (~0)
|
||||
|
||||
struct amdgpu_fpriv;
|
||||
|
||||
enum AMDGPU_XCP_IP_BLOCK {
|
||||
|
@ -68,7 +68,7 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
|
||||
enum AMDGPU_XCP_IP_BLOCK ip_blk;
|
||||
uint32_t inst_mask;
|
||||
|
||||
ring->xcp_id = ~0;
|
||||
ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
|
||||
if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
|
||||
return;
|
||||
|
||||
@ -177,7 +177,7 @@ static int aqua_vanjaram_select_scheds(
|
||||
u32 sel_xcp_id;
|
||||
int i;
|
||||
|
||||
if (fpriv->xcp_id == ~0) {
|
||||
if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
|
||||
u32 least_ref_cnt = ~0;
|
||||
|
||||
fpriv->xcp_id = 0;
|
||||
|
@ -49,6 +49,7 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin");
|
||||
MODULE_FIRMWARE("amdgpu/psp_13_0_11_toc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin");
|
||||
MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin");
|
||||
MODULE_FIRMWARE("amdgpu/psp_13_0_6_ta.bin");
|
||||
|
||||
/* For large FW files the time to complete can be very long */
|
||||
#define USBC_PD_POLLING_LIMIT_S 240
|
||||
|
@ -424,12 +424,12 @@ static void dm_pflip_high_irq(void *interrupt_params)
|
||||
|
||||
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||
|
||||
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
|
||||
DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
|
||||
amdgpu_crtc->pflip_status,
|
||||
AMDGPU_FLIP_SUBMITTED,
|
||||
amdgpu_crtc->crtc_id,
|
||||
amdgpu_crtc);
|
||||
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
|
||||
DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
|
||||
amdgpu_crtc->pflip_status,
|
||||
AMDGPU_FLIP_SUBMITTED,
|
||||
amdgpu_crtc->crtc_id,
|
||||
amdgpu_crtc);
|
||||
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||
return;
|
||||
}
|
||||
@ -883,7 +883,7 @@ static int dm_set_powergating_state(void *handle,
|
||||
}
|
||||
|
||||
/* Prototypes of private functions */
|
||||
static int dm_early_init(void* handle);
|
||||
static int dm_early_init(void *handle);
|
||||
|
||||
/* Allocate memory for FBC compressed data */
|
||||
static void amdgpu_dm_fbc_init(struct drm_connector *connector)
|
||||
@ -1282,7 +1282,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
|
||||
pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
|
||||
pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
|
||||
|
||||
pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
|
||||
pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;
|
||||
pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
|
||||
pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
|
||||
|
||||
@ -1347,6 +1347,15 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
|
||||
if (amdgpu_in_reset(adev))
|
||||
goto skip;
|
||||
|
||||
if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
|
||||
offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
|
||||
dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
|
||||
spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
|
||||
offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
|
||||
spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
|
||||
goto skip;
|
||||
}
|
||||
|
||||
mutex_lock(&adev->dm.dc_lock);
|
||||
if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
|
||||
dc_link_dp_handle_automated_test(dc_link);
|
||||
@ -1365,8 +1374,7 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
|
||||
DP_TEST_RESPONSE,
|
||||
&test_response.raw,
|
||||
sizeof(test_response));
|
||||
}
|
||||
else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
|
||||
} else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
|
||||
dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
|
||||
dc_link_dp_allow_hpd_rx_irq(dc_link)) {
|
||||
/* offload_work->data is from handle_hpd_rx_irq->
|
||||
@ -1554,7 +1562,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
mutex_init(&adev->dm.dc_lock);
|
||||
mutex_init(&adev->dm.audio_lock);
|
||||
|
||||
if(amdgpu_dm_irq_init(adev)) {
|
||||
if (amdgpu_dm_irq_init(adev)) {
|
||||
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
|
||||
goto error;
|
||||
}
|
||||
@ -1696,9 +1704,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
|
||||
adev->dm.dc->debug.disable_stutter = true;
|
||||
|
||||
if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
|
||||
if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
|
||||
adev->dm.dc->debug.disable_dsc = true;
|
||||
}
|
||||
|
||||
if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
|
||||
adev->dm.dc->debug.disable_clock_gate = true;
|
||||
@ -1942,8 +1949,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
|
||||
mutex_destroy(&adev->dm.audio_lock);
|
||||
mutex_destroy(&adev->dm.dc_lock);
|
||||
mutex_destroy(&adev->dm.dpia_aux_lock);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int load_dmcu_fw(struct amdgpu_device *adev)
|
||||
@ -1952,7 +1957,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
|
||||
int r;
|
||||
const struct dmcu_firmware_header_v1_0 *hdr;
|
||||
|
||||
switch(adev->asic_type) {
|
||||
switch (adev->asic_type) {
|
||||
#if defined(CONFIG_DRM_AMD_DC_SI)
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
@ -2709,7 +2714,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
|
||||
struct dc_scaling_info scaling_infos[MAX_SURFACES];
|
||||
struct dc_flip_addrs flip_addrs[MAX_SURFACES];
|
||||
struct dc_stream_update stream_update;
|
||||
} * bundle;
|
||||
} *bundle;
|
||||
int k, m;
|
||||
|
||||
bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
|
||||
@ -2739,8 +2744,6 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
|
||||
|
||||
cleanup:
|
||||
kfree(bundle);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int dm_resume(void *handle)
|
||||
@ -2954,8 +2957,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
|
||||
.set_powergating_state = dm_set_powergating_state,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version dm_ip_block =
|
||||
{
|
||||
const struct amdgpu_ip_block_version dm_ip_block = {
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
@ -3000,9 +3002,12 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
|
||||
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
|
||||
caps->aux_support = false;
|
||||
|
||||
if (caps->ext_caps->bits.oled == 1 /*||
|
||||
caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
|
||||
caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
|
||||
if (caps->ext_caps->bits.oled == 1
|
||||
/*
|
||||
* ||
|
||||
* caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
|
||||
* caps->ext_caps->bits.hdr_aux_backlight_control == 1
|
||||
*/)
|
||||
caps->aux_support = true;
|
||||
|
||||
if (amdgpu_backlight == 0)
|
||||
@ -3236,86 +3241,6 @@ static void handle_hpd_irq(void *param)
|
||||
|
||||
}
|
||||
|
||||
static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
|
||||
u8 dret;
|
||||
bool new_irq_handled = false;
|
||||
int dpcd_addr;
|
||||
int dpcd_bytes_to_read;
|
||||
|
||||
const int max_process_count = 30;
|
||||
int process_count = 0;
|
||||
|
||||
const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
|
||||
|
||||
if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
|
||||
dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
|
||||
/* DPCD 0x200 - 0x201 for downstream IRQ */
|
||||
dpcd_addr = DP_SINK_COUNT;
|
||||
} else {
|
||||
dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
|
||||
/* DPCD 0x2002 - 0x2005 for downstream IRQ */
|
||||
dpcd_addr = DP_SINK_COUNT_ESI;
|
||||
}
|
||||
|
||||
dret = drm_dp_dpcd_read(
|
||||
&aconnector->dm_dp_aux.aux,
|
||||
dpcd_addr,
|
||||
esi,
|
||||
dpcd_bytes_to_read);
|
||||
|
||||
while (dret == dpcd_bytes_to_read &&
|
||||
process_count < max_process_count) {
|
||||
u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
|
||||
u8 retry;
|
||||
dret = 0;
|
||||
|
||||
process_count++;
|
||||
|
||||
DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
|
||||
/* handle HPD short pulse irq */
|
||||
if (aconnector->mst_mgr.mst_state)
|
||||
drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
|
||||
esi,
|
||||
ack,
|
||||
&new_irq_handled);
|
||||
|
||||
if (new_irq_handled) {
|
||||
/* ACK at DPCD to notify down stream */
|
||||
for (retry = 0; retry < 3; retry++) {
|
||||
ssize_t wret;
|
||||
|
||||
wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
|
||||
dpcd_addr + 1,
|
||||
ack[1]);
|
||||
if (wret == 1)
|
||||
break;
|
||||
}
|
||||
|
||||
if (retry == 3) {
|
||||
DRM_ERROR("Failed to ack MST event.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
|
||||
/* check if there is new irq to be handled */
|
||||
dret = drm_dp_dpcd_read(
|
||||
&aconnector->dm_dp_aux.aux,
|
||||
dpcd_addr,
|
||||
esi,
|
||||
dpcd_bytes_to_read);
|
||||
|
||||
new_irq_handled = false;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (process_count == max_process_count)
|
||||
DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
|
||||
}
|
||||
|
||||
static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
|
||||
union hpd_irq_data hpd_irq_data)
|
||||
{
|
||||
@ -3377,7 +3302,23 @@ static void handle_hpd_rx_irq(void *param)
|
||||
if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
|
||||
if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
|
||||
hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
|
||||
dm_handle_mst_sideband_msg(aconnector);
|
||||
bool skip = false;
|
||||
|
||||
/*
|
||||
* DOWN_REP_MSG_RDY is also handled by polling method
|
||||
* mgr->cbs->poll_hpd_irq()
|
||||
*/
|
||||
spin_lock(&offload_wq->offload_lock);
|
||||
skip = offload_wq->is_handling_mst_msg_rdy_event;
|
||||
|
||||
if (!skip)
|
||||
offload_wq->is_handling_mst_msg_rdy_event = true;
|
||||
|
||||
spin_unlock(&offload_wq->offload_lock);
|
||||
|
||||
if (!skip)
|
||||
schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -3468,7 +3409,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
dc_link = aconnector->dc_link;
|
||||
|
||||
if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
|
||||
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
|
||||
int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
|
||||
int_params.irq_source = dc_link->irq_source_hpd;
|
||||
|
||||
@ -3477,7 +3418,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
|
||||
(void *) aconnector);
|
||||
}
|
||||
|
||||
if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
|
||||
if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
|
||||
|
||||
/* Also register for DP short pulse (hpd_rx). */
|
||||
int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
|
||||
@ -3486,11 +3427,11 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
|
||||
amdgpu_dm_irq_register_interrupt(adev, &int_params,
|
||||
handle_hpd_rx_irq,
|
||||
(void *) aconnector);
|
||||
|
||||
if (adev->dm.hpd_rx_offload_wq)
|
||||
adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector =
|
||||
aconnector;
|
||||
}
|
||||
|
||||
if (adev->dm.hpd_rx_offload_wq)
|
||||
adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
|
||||
aconnector;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3503,7 +3444,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
|
||||
struct dc_interrupt_params int_params = {0};
|
||||
int r;
|
||||
int i;
|
||||
unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
|
||||
unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
|
||||
|
||||
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
|
||||
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
|
||||
@ -3517,11 +3458,12 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
|
||||
* Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
|
||||
* coming from DC hardware.
|
||||
* amdgpu_dm_irq_handler() will re-direct the interrupt to DC
|
||||
* for acknowledging and handling. */
|
||||
* for acknowledging and handling.
|
||||
*/
|
||||
|
||||
/* Use VBLANK interrupt */
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||
r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
|
||||
r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to add crtc irq id!\n");
|
||||
return r;
|
||||
@ -3529,7 +3471,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
|
||||
|
||||
int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
|
||||
int_params.irq_source =
|
||||
dc_interrupt_to_irq_source(dc, i+1 , 0);
|
||||
dc_interrupt_to_irq_source(dc, i + 1, 0);
|
||||
|
||||
c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
|
||||
|
||||
@ -3585,7 +3527,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
|
||||
struct dc_interrupt_params int_params = {0};
|
||||
int r;
|
||||
int i;
|
||||
unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
|
||||
unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
|
||||
|
||||
if (adev->family >= AMDGPU_FAMILY_AI)
|
||||
client_id = SOC15_IH_CLIENTID_DCE;
|
||||
@ -3602,7 +3544,8 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
|
||||
* Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
|
||||
* coming from DC hardware.
|
||||
* amdgpu_dm_irq_handler() will re-direct the interrupt to DC
|
||||
* for acknowledging and handling. */
|
||||
* for acknowledging and handling.
|
||||
*/
|
||||
|
||||
/* Use VBLANK interrupt */
|
||||
for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
|
||||
@ -4049,7 +3992,7 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
|
||||
}
|
||||
|
||||
static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
|
||||
unsigned *min, unsigned *max)
|
||||
unsigned int *min, unsigned int *max)
|
||||
{
|
||||
if (!caps)
|
||||
return 0;
|
||||
@ -4069,7 +4012,7 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
|
||||
static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
|
||||
uint32_t brightness)
|
||||
{
|
||||
unsigned min, max;
|
||||
unsigned int min, max;
|
||||
|
||||
if (!get_brightness_range(caps, &min, &max))
|
||||
return brightness;
|
||||
@ -4082,7 +4025,7 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c
|
||||
static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
|
||||
uint32_t brightness)
|
||||
{
|
||||
unsigned min, max;
|
||||
unsigned int min, max;
|
||||
|
||||
if (!get_brightness_range(caps, &min, &max))
|
||||
return brightness;
|
||||
@ -4562,7 +4505,6 @@ fail:
|
||||
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
|
||||
{
|
||||
drm_atomic_private_obj_fini(&dm->atomic_obj);
|
||||
return;
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
@ -5394,6 +5336,7 @@ static bool adjust_colour_depth_from_display_info(
|
||||
{
|
||||
enum dc_color_depth depth = timing_out->display_color_depth;
|
||||
int normalized_clk;
|
||||
|
||||
do {
|
||||
normalized_clk = timing_out->pix_clk_100hz / 10;
|
||||
/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
|
||||
@ -5609,6 +5552,7 @@ create_fake_sink(struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
struct dc_sink_init_data sink_init_data = { 0 };
|
||||
struct dc_sink *sink = NULL;
|
||||
|
||||
sink_init_data.link = aconnector->dc_link;
|
||||
sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
|
||||
|
||||
@ -5732,7 +5676,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
|
||||
return &aconnector->freesync_vid_base;
|
||||
|
||||
/* Find the preferred mode */
|
||||
list_for_each_entry (m, list_head, head) {
|
||||
list_for_each_entry(m, list_head, head) {
|
||||
if (m->type & DRM_MODE_TYPE_PREFERRED) {
|
||||
m_pref = m;
|
||||
break;
|
||||
@ -5756,7 +5700,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
|
||||
* For some monitors, preferred mode is not the mode with highest
|
||||
* supported refresh rate.
|
||||
*/
|
||||
list_for_each_entry (m, list_head, head) {
|
||||
list_for_each_entry(m, list_head, head) {
|
||||
current_refresh = drm_mode_vrefresh(m);
|
||||
|
||||
if (m->hdisplay == m_pref->hdisplay &&
|
||||
@ -6028,7 +5972,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
* This may not be an error, the use case is when we have no
|
||||
* usermode calls to reset and set mode upon hotplug. In this
|
||||
* case, we call set mode ourselves to restore the previous mode
|
||||
* and the modelist may not be filled in in time.
|
||||
* and the modelist may not be filled in time.
|
||||
*/
|
||||
DRM_DEBUG_DRIVER("No preferred mode found\n");
|
||||
} else {
|
||||
@ -6051,9 +5995,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
drm_mode_set_crtcinfo(&mode, 0);
|
||||
|
||||
/*
|
||||
* If scaling is enabled and refresh rate didn't change
|
||||
* we copy the vic and polarities of the old timings
|
||||
*/
|
||||
* If scaling is enabled and refresh rate didn't change
|
||||
* we copy the vic and polarities of the old timings
|
||||
*/
|
||||
if (!scale || mode_refresh != preferred_refresh)
|
||||
fill_stream_properties_from_drm_display_mode(
|
||||
stream, &mode, &aconnector->base, con_state, NULL,
|
||||
@ -6817,6 +6761,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
|
||||
|
||||
if (!state->duplicated) {
|
||||
int max_bpc = conn_state->max_requested_bpc;
|
||||
|
||||
is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
|
||||
aconnector->force_yuv420_output;
|
||||
color_depth = convert_color_depth_from_display_info(connector,
|
||||
@ -7135,7 +7080,7 @@ static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
|
||||
{
|
||||
struct drm_display_mode *m;
|
||||
|
||||
list_for_each_entry (m, &aconnector->base.probed_modes, head) {
|
||||
list_for_each_entry(m, &aconnector->base.probed_modes, head) {
|
||||
if (drm_mode_equal(m, mode))
|
||||
return true;
|
||||
}
|
||||
@ -7295,6 +7240,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
|
||||
aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
|
||||
memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
|
||||
mutex_init(&aconnector->hpd_lock);
|
||||
mutex_init(&aconnector->handle_mst_msg_ready);
|
||||
|
||||
/*
|
||||
* configure support HPD hot plug connector_>polled default value is 0
|
||||
@ -7454,7 +7400,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
|
||||
|
||||
link->priv = aconnector;
|
||||
|
||||
DRM_DEBUG_DRIVER("%s()\n", __func__);
|
||||
|
||||
i2c = create_i2c(link->ddc, link->link_index, &res);
|
||||
if (!i2c) {
|
||||
@ -8125,7 +8070,15 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
* Only allow immediate flips for fast updates that don't
|
||||
* change memory domain, FB pitch, DCC state, rotation or
|
||||
* mirroring.
|
||||
*
|
||||
* dm_crtc_helper_atomic_check() only accepts async flips with
|
||||
* fast updates.
|
||||
*/
|
||||
if (crtc->state->async_flip &&
|
||||
acrtc_state->update_type != UPDATE_TYPE_FAST)
|
||||
drm_warn_once(state->dev,
|
||||
"[PLANE:%d:%s] async flip with non-fast update\n",
|
||||
plane->base.id, plane->name);
|
||||
bundle->flip_addrs[planes_count].flip_immediate =
|
||||
crtc->state->async_flip &&
|
||||
acrtc_state->update_type == UPDATE_TYPE_FAST &&
|
||||
@ -8168,8 +8121,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
* DRI3/Present extension with defined target_msc.
|
||||
*/
|
||||
last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
/* For variable refresh rate mode only:
|
||||
* Get vblank of last completed flip to avoid > 1 vrr
|
||||
* flips per video frame by use of throttling, but allow
|
||||
@ -8502,8 +8454,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
dc_resource_state_copy_construct_current(dm->dc, dc_state);
|
||||
}
|
||||
|
||||
for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
|
||||
new_crtc_state, i) {
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
|
||||
new_crtc_state, i) {
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
|
||||
@ -8526,9 +8478,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
|
||||
|
||||
drm_dbg_state(state->dev,
|
||||
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
|
||||
"planes_changed:%d, mode_changed:%d,active_changed:%d,"
|
||||
"connectors_changed:%d\n",
|
||||
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
|
||||
acrtc->crtc_id,
|
||||
new_crtc_state->enable,
|
||||
new_crtc_state->active,
|
||||
@ -9104,8 +9054,8 @@ static int do_aquire_global_lock(struct drm_device *dev,
|
||||
&commit->flip_done, 10*HZ);
|
||||
|
||||
if (ret == 0)
|
||||
DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
|
||||
"timed out\n", crtc->base.id, crtc->name);
|
||||
DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
|
||||
crtc->base.id, crtc->name);
|
||||
|
||||
drm_crtc_commit_put(commit);
|
||||
}
|
||||
@ -9190,7 +9140,8 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
|
||||
return false;
|
||||
}
|
||||
|
||||
static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
|
||||
static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state)
|
||||
{
|
||||
u64 num, den, res;
|
||||
struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
|
||||
|
||||
@ -9312,9 +9263,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
|
||||
goto skip_modeset;
|
||||
|
||||
drm_dbg_state(state->dev,
|
||||
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
|
||||
"planes_changed:%d, mode_changed:%d,active_changed:%d,"
|
||||
"connectors_changed:%d\n",
|
||||
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
|
||||
acrtc->crtc_id,
|
||||
new_crtc_state->enable,
|
||||
new_crtc_state->active,
|
||||
@ -9343,8 +9292,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
|
||||
old_crtc_state)) {
|
||||
new_crtc_state->mode_changed = false;
|
||||
DRM_DEBUG_DRIVER(
|
||||
"Mode change not required for front porch change, "
|
||||
"setting mode_changed to %d",
|
||||
"Mode change not required for front porch change, setting mode_changed to %d",
|
||||
new_crtc_state->mode_changed);
|
||||
|
||||
set_freesync_fixed_config(dm_new_crtc_state);
|
||||
@ -9356,9 +9304,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
|
||||
struct drm_display_mode *high_mode;
|
||||
|
||||
high_mode = get_highest_refresh_rate_mode(aconnector, false);
|
||||
if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
|
||||
if (!drm_mode_equal(&new_crtc_state->mode, high_mode))
|
||||
set_freesync_fixed_config(dm_new_crtc_state);
|
||||
}
|
||||
}
|
||||
|
||||
ret = dm_atomic_get_state(state, &dm_state);
|
||||
@ -9526,6 +9473,7 @@ static bool should_reset_plane(struct drm_atomic_state *state,
|
||||
*/
|
||||
for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
|
||||
struct amdgpu_framebuffer *old_afb, *new_afb;
|
||||
|
||||
if (other->type == DRM_PLANE_TYPE_CURSOR)
|
||||
continue;
|
||||
|
||||
@ -9624,11 +9572,12 @@ static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
|
||||
}
|
||||
|
||||
/* Core DRM takes care of checking FB modifiers, so we only need to
|
||||
* check tiling flags when the FB doesn't have a modifier. */
|
||||
* check tiling flags when the FB doesn't have a modifier.
|
||||
*/
|
||||
if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
|
||||
if (adev->family < AMDGPU_FAMILY_AI) {
|
||||
linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
|
||||
AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
|
||||
AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
|
||||
AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
|
||||
} else {
|
||||
linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
|
||||
@ -9850,12 +9799,12 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
|
||||
/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
|
||||
* cursor per pipe but it's going to inherit the scaling and
|
||||
* positioning from the underlying pipe. Check the cursor plane's
|
||||
* blending properties match the underlying planes'. */
|
||||
* blending properties match the underlying planes'.
|
||||
*/
|
||||
|
||||
new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
|
||||
if (!new_cursor_state || !new_cursor_state->fb) {
|
||||
if (!new_cursor_state || !new_cursor_state->fb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
|
||||
cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
|
||||
@ -9900,6 +9849,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
|
||||
struct drm_connector_state *conn_state, *old_conn_state;
|
||||
struct amdgpu_dm_connector *aconnector = NULL;
|
||||
int i;
|
||||
|
||||
for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
|
||||
if (!conn_state->crtc)
|
||||
conn_state = old_conn_state;
|
||||
@ -10334,7 +10284,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
}
|
||||
|
||||
/* Store the overall update type for use later in atomic check. */
|
||||
for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
|
||||
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||
struct dm_crtc_state *dm_new_crtc_state =
|
||||
to_dm_crtc_state(new_crtc_state);
|
||||
|
||||
@ -10356,7 +10306,7 @@ fail:
|
||||
else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
|
||||
DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
|
||||
else
|
||||
DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
|
||||
DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret);
|
||||
|
||||
trace_amdgpu_dm_atomic_check_finish(state, ret);
|
||||
|
||||
|
@ -194,6 +194,11 @@ struct hpd_rx_irq_offload_work_queue {
|
||||
* we're handling link loss
|
||||
*/
|
||||
bool is_handling_link_loss;
|
||||
/**
|
||||
* @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message
|
||||
* ready event when we're already handling mst message ready event
|
||||
*/
|
||||
bool is_handling_mst_msg_rdy_event;
|
||||
/**
|
||||
* @aconnector: The aconnector that this work queue is attached to
|
||||
*/
|
||||
@ -638,6 +643,8 @@ struct amdgpu_dm_connector {
|
||||
struct drm_dp_mst_port *mst_output_port;
|
||||
struct amdgpu_dm_connector *mst_root;
|
||||
struct drm_dp_aux *dsc_aux;
|
||||
struct mutex handle_mst_msg_ready;
|
||||
|
||||
/* TODO see if we can merge with ddc_bus or make a dm_connector */
|
||||
struct amdgpu_i2c_adapter *i2c;
|
||||
|
||||
|
@ -398,6 +398,18 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Only allow async flips for fast updates that don't change the FB
|
||||
* pitch, the DCC state, rotation, etc.
|
||||
*/
|
||||
if (crtc_state->async_flip &&
|
||||
dm_crtc_state->update_type != UPDATE_TYPE_FAST) {
|
||||
drm_dbg_atomic(crtc->dev,
|
||||
"[CRTC:%d:%s] async flips are only supported for fast updates\n",
|
||||
crtc->base.id, crtc->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* In some use cases, like reset, no stream is attached */
|
||||
if (!dm_crtc_state->stream)
|
||||
return 0;
|
||||
|
@ -619,8 +619,118 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
|
||||
return connector;
|
||||
}
|
||||
|
||||
void dm_handle_mst_sideband_msg_ready_event(
|
||||
struct drm_dp_mst_topology_mgr *mgr,
|
||||
enum mst_msg_ready_type msg_rdy_type)
|
||||
{
|
||||
uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
|
||||
uint8_t dret;
|
||||
bool new_irq_handled = false;
|
||||
int dpcd_addr;
|
||||
uint8_t dpcd_bytes_to_read;
|
||||
const uint8_t max_process_count = 30;
|
||||
uint8_t process_count = 0;
|
||||
u8 retry;
|
||||
struct amdgpu_dm_connector *aconnector =
|
||||
container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
|
||||
|
||||
|
||||
const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
|
||||
|
||||
if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
|
||||
dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
|
||||
/* DPCD 0x200 - 0x201 for downstream IRQ */
|
||||
dpcd_addr = DP_SINK_COUNT;
|
||||
} else {
|
||||
dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
|
||||
/* DPCD 0x2002 - 0x2005 for downstream IRQ */
|
||||
dpcd_addr = DP_SINK_COUNT_ESI;
|
||||
}
|
||||
|
||||
mutex_lock(&aconnector->handle_mst_msg_ready);
|
||||
|
||||
while (process_count < max_process_count) {
|
||||
u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
|
||||
|
||||
process_count++;
|
||||
|
||||
dret = drm_dp_dpcd_read(
|
||||
&aconnector->dm_dp_aux.aux,
|
||||
dpcd_addr,
|
||||
esi,
|
||||
dpcd_bytes_to_read);
|
||||
|
||||
if (dret != dpcd_bytes_to_read) {
|
||||
DRM_DEBUG_KMS("DPCD read and acked number is not as expected!");
|
||||
break;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
|
||||
|
||||
switch (msg_rdy_type) {
|
||||
case DOWN_REP_MSG_RDY_EVENT:
|
||||
/* Only handle DOWN_REP_MSG_RDY case*/
|
||||
esi[1] &= DP_DOWN_REP_MSG_RDY;
|
||||
break;
|
||||
case UP_REQ_MSG_RDY_EVENT:
|
||||
/* Only handle UP_REQ_MSG_RDY case*/
|
||||
esi[1] &= DP_UP_REQ_MSG_RDY;
|
||||
break;
|
||||
default:
|
||||
/* Handle both cases*/
|
||||
esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!esi[1])
|
||||
break;
|
||||
|
||||
/* handle MST irq */
|
||||
if (aconnector->mst_mgr.mst_state)
|
||||
drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
|
||||
esi,
|
||||
ack,
|
||||
&new_irq_handled);
|
||||
|
||||
if (new_irq_handled) {
|
||||
/* ACK at DPCD to notify down stream */
|
||||
for (retry = 0; retry < 3; retry++) {
|
||||
ssize_t wret;
|
||||
|
||||
wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
|
||||
dpcd_addr + 1,
|
||||
ack[1]);
|
||||
if (wret == 1)
|
||||
break;
|
||||
}
|
||||
|
||||
if (retry == 3) {
|
||||
DRM_ERROR("Failed to ack MST event.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
|
||||
|
||||
new_irq_handled = false;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&aconnector->handle_mst_msg_ready);
|
||||
|
||||
if (process_count == max_process_count)
|
||||
DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
|
||||
}
|
||||
|
||||
static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr)
|
||||
{
|
||||
dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT);
|
||||
}
|
||||
|
||||
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
|
||||
.add_connector = dm_dp_add_mst_connector,
|
||||
.poll_hpd_irq = dm_handle_mst_down_rep_msg_ready,
|
||||
};
|
||||
|
||||
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
|
||||
|
@ -49,6 +49,13 @@
|
||||
#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
|
||||
#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
|
||||
|
||||
enum mst_msg_ready_type {
|
||||
NONE_MSG_RDY_EVENT = 0,
|
||||
DOWN_REP_MSG_RDY_EVENT = 1,
|
||||
UP_REQ_MSG_RDY_EVENT = 2,
|
||||
DOWN_OR_UP_MSG_RDY_EVENT = 3
|
||||
};
|
||||
|
||||
struct amdgpu_display_manager;
|
||||
struct amdgpu_dm_connector;
|
||||
|
||||
@ -61,6 +68,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
|
||||
void
|
||||
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
|
||||
|
||||
void dm_handle_mst_sideband_msg_ready_event(
|
||||
struct drm_dp_mst_topology_mgr *mgr,
|
||||
enum mst_msg_ready_type msg_rdy_type);
|
||||
|
||||
struct dsc_mst_fairness_vars {
|
||||
int pbn;
|
||||
bool dsc_enabled;
|
||||
|
@ -87,6 +87,11 @@ static int dcn31_get_active_display_cnt_wa(
|
||||
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
|
||||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
|
||||
tmds_present = true;
|
||||
|
||||
/* Checking stream / link detection ensuring that PHY is active*/
|
||||
if (dc_is_dp_signal(stream->signal) && !stream->dpms_off)
|
||||
display_count++;
|
||||
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->link_count; i++) {
|
||||
|
@ -3278,7 +3278,8 @@ void dcn10_wait_for_mpcc_disconnect(
|
||||
if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
|
||||
struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
|
||||
|
||||
if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
|
||||
if (pipe_ctx->stream_res.tg &&
|
||||
pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
|
||||
res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
|
||||
pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
|
||||
hubp->funcs->set_blank(hubp, true);
|
||||
|
@ -215,7 +215,7 @@ void optc3_set_odm_bypass(struct timing_generator *optc,
|
||||
optc1->opp_count = 1;
|
||||
}
|
||||
|
||||
static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
|
||||
void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
|
||||
struct dc_crtc_timing *timing)
|
||||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
@ -293,7 +293,7 @@ static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool e
|
||||
OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode);
|
||||
}
|
||||
|
||||
static void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)
|
||||
void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)
|
||||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
|
||||
|
@ -351,6 +351,9 @@ void optc3_set_timing_db_mode(struct timing_generator *optc, bool enable);
|
||||
|
||||
void optc3_set_odm_bypass(struct timing_generator *optc,
|
||||
const struct dc_crtc_timing *dc_crtc_timing);
|
||||
void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
|
||||
struct dc_crtc_timing *timing);
|
||||
void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc);
|
||||
void optc3_tg_init(struct timing_generator *optc);
|
||||
void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max);
|
||||
#endif /* __DC_OPTC_DCN30_H__ */
|
||||
|
@ -11,7 +11,8 @@
|
||||
# Makefile for dcn30.
|
||||
|
||||
DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \
|
||||
dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o
|
||||
dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o \
|
||||
dcn301_optc.o
|
||||
|
||||
AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301))
|
||||
|
||||
|
185
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c
Normal file
185
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c
Normal file
@ -0,0 +1,185 @@
|
||||
/*
|
||||
* Copyright 2020 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#include "reg_helper.h"
|
||||
#include "dcn301_optc.h"
|
||||
#include "dc.h"
|
||||
#include "dcn_calc_math.h"
|
||||
#include "dc_dmub_srv.h"
|
||||
|
||||
#include "dml/dcn30/dcn30_fpu.h"
|
||||
#include "dc_trace.h"
|
||||
|
||||
#define REG(reg)\
|
||||
optc1->tg_regs->reg
|
||||
|
||||
#define CTX \
|
||||
optc1->base.ctx
|
||||
|
||||
#undef FN
|
||||
#define FN(reg_name, field_name) \
|
||||
optc1->tg_shift->field_name, optc1->tg_mask->field_name
|
||||
|
||||
|
||||
/**
|
||||
* optc301_set_drr() - Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*.
|
||||
*
|
||||
* @optc: timing_generator instance.
|
||||
* @params: parameters used for Dynamic Refresh Rate.
|
||||
*/
|
||||
void optc301_set_drr(
|
||||
struct timing_generator *optc,
|
||||
const struct drr_params *params)
|
||||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
|
||||
if (params != NULL &&
|
||||
params->vertical_total_max > 0 &&
|
||||
params->vertical_total_min > 0) {
|
||||
|
||||
if (params->vertical_total_mid != 0) {
|
||||
|
||||
REG_SET(OTG_V_TOTAL_MID, 0,
|
||||
OTG_V_TOTAL_MID, params->vertical_total_mid - 1);
|
||||
|
||||
REG_UPDATE_2(OTG_V_TOTAL_CONTROL,
|
||||
OTG_VTOTAL_MID_REPLACING_MAX_EN, 1,
|
||||
OTG_VTOTAL_MID_FRAME_NUM,
|
||||
(uint8_t)params->vertical_total_mid_frame_num);
|
||||
|
||||
}
|
||||
|
||||
optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1);
|
||||
|
||||
REG_UPDATE_5(OTG_V_TOTAL_CONTROL,
|
||||
OTG_V_TOTAL_MIN_SEL, 1,
|
||||
OTG_V_TOTAL_MAX_SEL, 1,
|
||||
OTG_FORCE_LOCK_ON_EVENT, 0,
|
||||
OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
|
||||
OTG_SET_V_TOTAL_MIN_MASK, 0);
|
||||
// Setup manual flow control for EOF via TRIG_A
|
||||
optc->funcs->setup_manual_trigger(optc);
|
||||
|
||||
} else {
|
||||
REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
|
||||
OTG_SET_V_TOTAL_MIN_MASK, 0,
|
||||
OTG_V_TOTAL_MIN_SEL, 0,
|
||||
OTG_V_TOTAL_MAX_SEL, 0,
|
||||
OTG_FORCE_LOCK_ON_EVENT, 0);
|
||||
|
||||
optc->funcs->set_vtotal_min_max(optc, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void optc301_setup_manual_trigger(struct timing_generator *optc)
|
||||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
|
||||
REG_SET_8(OTG_TRIGA_CNTL, 0,
|
||||
OTG_TRIGA_SOURCE_SELECT, 21,
|
||||
OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
|
||||
OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1,
|
||||
OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0,
|
||||
OTG_TRIGA_POLARITY_SELECT, 0,
|
||||
OTG_TRIGA_FREQUENCY_SELECT, 0,
|
||||
OTG_TRIGA_DELAY, 0,
|
||||
OTG_TRIGA_CLEAR, 1);
|
||||
}
|
||||
|
||||
static struct timing_generator_funcs dcn30_tg_funcs = {
|
||||
.validate_timing = optc1_validate_timing,
|
||||
.program_timing = optc1_program_timing,
|
||||
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
|
||||
.setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
|
||||
.setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
|
||||
.program_global_sync = optc1_program_global_sync,
|
||||
.enable_crtc = optc2_enable_crtc,
|
||||
.disable_crtc = optc1_disable_crtc,
|
||||
/* used by enable_timing_synchronization. Not need for FPGA */
|
||||
.is_counter_moving = optc1_is_counter_moving,
|
||||
.get_position = optc1_get_position,
|
||||
.get_frame_count = optc1_get_vblank_counter,
|
||||
.get_scanoutpos = optc1_get_crtc_scanoutpos,
|
||||
.get_otg_active_size = optc1_get_otg_active_size,
|
||||
.set_early_control = optc1_set_early_control,
|
||||
/* used by enable_timing_synchronization. Not need for FPGA */
|
||||
.wait_for_state = optc1_wait_for_state,
|
||||
.set_blank_color = optc3_program_blank_color,
|
||||
.did_triggered_reset_occur = optc1_did_triggered_reset_occur,
|
||||
.triplebuffer_lock = optc3_triplebuffer_lock,
|
||||
.triplebuffer_unlock = optc2_triplebuffer_unlock,
|
||||
.enable_reset_trigger = optc1_enable_reset_trigger,
|
||||
.enable_crtc_reset = optc1_enable_crtc_reset,
|
||||
.disable_reset_trigger = optc1_disable_reset_trigger,
|
||||
.lock = optc3_lock,
|
||||
.unlock = optc1_unlock,
|
||||
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
|
||||
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
|
||||
.enable_optc_clock = optc1_enable_optc_clock,
|
||||
.set_drr = optc301_set_drr,
|
||||
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
|
||||
.set_vtotal_min_max = optc3_set_vtotal_min_max,
|
||||
.set_static_screen_control = optc1_set_static_screen_control,
|
||||
.program_stereo = optc1_program_stereo,
|
||||
.is_stereo_left_eye = optc1_is_stereo_left_eye,
|
||||
.tg_init = optc3_tg_init,
|
||||
.is_tg_enabled = optc1_is_tg_enabled,
|
||||
.is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
|
||||
.clear_optc_underflow = optc1_clear_optc_underflow,
|
||||
.setup_global_swap_lock = NULL,
|
||||
.get_crc = optc1_get_crc,
|
||||
.configure_crc = optc2_configure_crc,
|
||||
.set_dsc_config = optc3_set_dsc_config,
|
||||
.get_dsc_status = optc2_get_dsc_status,
|
||||
.set_dwb_source = NULL,
|
||||
.set_odm_bypass = optc3_set_odm_bypass,
|
||||
.set_odm_combine = optc3_set_odm_combine,
|
||||
.get_optc_source = optc2_get_optc_source,
|
||||
.set_out_mux = optc3_set_out_mux,
|
||||
.set_drr_trigger_window = optc3_set_drr_trigger_window,
|
||||
.set_vtotal_change_limit = optc3_set_vtotal_change_limit,
|
||||
.set_gsl = optc2_set_gsl,
|
||||
.set_gsl_source_select = optc2_set_gsl_source_select,
|
||||
.set_vtg_params = optc1_set_vtg_params,
|
||||
.program_manual_trigger = optc2_program_manual_trigger,
|
||||
.setup_manual_trigger = optc301_setup_manual_trigger,
|
||||
.get_hw_timing = optc1_get_hw_timing,
|
||||
.wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear,
|
||||
};
|
||||
|
||||
void dcn301_timing_generator_init(struct optc *optc1)
|
||||
{
|
||||
optc1->base.funcs = &dcn30_tg_funcs;
|
||||
|
||||
optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1;
|
||||
optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1;
|
||||
|
||||
optc1->min_h_blank = 32;
|
||||
optc1->min_v_blank = 3;
|
||||
optc1->min_v_blank_interlace = 5;
|
||||
optc1->min_h_sync_width = 4;
|
||||
optc1->min_v_sync_width = 1;
|
||||
}
|
36
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h
Normal file
36
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Copyright 2020 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __DC_OPTC_DCN301_H__
|
||||
#define __DC_OPTC_DCN301_H__
|
||||
|
||||
#include "dcn20/dcn20_optc.h"
|
||||
#include "dcn30/dcn30_optc.h"
|
||||
|
||||
void dcn301_timing_generator_init(struct optc *optc1);
|
||||
void optc301_setup_manual_trigger(struct timing_generator *optc);
|
||||
void optc301_set_drr(struct timing_generator *optc, const struct drr_params *params);
|
||||
|
||||
#endif /* __DC_OPTC_DCN301_H__ */
|
@ -42,7 +42,7 @@
|
||||
#include "dcn30/dcn30_hubp.h"
|
||||
#include "irq/dcn30/irq_service_dcn30.h"
|
||||
#include "dcn30/dcn30_dpp.h"
|
||||
#include "dcn30/dcn30_optc.h"
|
||||
#include "dcn301/dcn301_optc.h"
|
||||
#include "dcn20/dcn20_hwseq.h"
|
||||
#include "dcn30/dcn30_hwseq.h"
|
||||
#include "dce110/dce110_hw_sequencer.h"
|
||||
@ -855,7 +855,7 @@ static struct timing_generator *dcn301_timing_generator_create(
|
||||
tgn10->tg_shift = &optc_shift;
|
||||
tgn10->tg_mask = &optc_mask;
|
||||
|
||||
dcn30_timing_generator_init(tgn10);
|
||||
dcn301_timing_generator_init(tgn10);
|
||||
|
||||
return &tgn10->base;
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.timing_trace = false,
|
||||
.clock_trace = true,
|
||||
.disable_pplib_clock_request = true,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
|
@ -295,7 +295,11 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
|
||||
pipe = &res_ctx->pipe_ctx[i];
|
||||
timing = &pipe->stream->timing;
|
||||
|
||||
pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
|
||||
if (pipe->stream->adjust.v_total_min != 0)
|
||||
pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
|
||||
else
|
||||
pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
|
||||
|
||||
pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive;
|
||||
pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, dcn3_14_ip.VBlankNomDefaultUS);
|
||||
pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width);
|
||||
|
@ -1798,17 +1798,6 @@ static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
||||
return result;
|
||||
}
|
||||
|
||||
static bool intel_core_rkl_chk(void)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_X86_64)
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
return (c->x86 == 6 && c->x86_model == INTEL_FAM6_ROCKETLAKE);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
||||
@ -1835,7 +1824,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
|
||||
data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
|
||||
data->pcie_dpm_key_disabled =
|
||||
intel_core_rkl_chk() || !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
|
||||
!amdgpu_device_pcie_dynamic_switching_supported() ||
|
||||
!(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
|
||||
/* need to set voltage control types before EVV patching */
|
||||
data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
|
||||
data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
|
||||
|
@ -1927,12 +1927,16 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
|
||||
*size = 4;
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_GFX_MCLK:
|
||||
ret = sienna_cichlid_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data);
|
||||
ret = sienna_cichlid_get_smu_metrics_data(smu,
|
||||
METRICS_CURR_UCLK,
|
||||
(uint32_t *)data);
|
||||
*(uint32_t *)data *= 100;
|
||||
*size = 4;
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_GFX_SCLK:
|
||||
ret = sienna_cichlid_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data);
|
||||
ret = sienna_cichlid_get_smu_metrics_data(smu,
|
||||
METRICS_AVERAGE_GFXCLK,
|
||||
(uint32_t *)data);
|
||||
*(uint32_t *)data *= 100;
|
||||
*size = 4;
|
||||
break;
|
||||
|
@ -949,7 +949,7 @@ static int smu_v13_0_7_read_sensor(struct smu_context *smu,
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_GFX_MCLK:
|
||||
ret = smu_v13_0_7_get_smu_metrics_data(smu,
|
||||
METRICS_AVERAGE_UCLK,
|
||||
METRICS_CURR_UCLK,
|
||||
(uint32_t *)data);
|
||||
*(uint32_t *)data *= 100;
|
||||
*size = 4;
|
||||
|
Loading…
Reference in New Issue
Block a user