mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
drm fixes for 5.19-rc2
amdgpu: - DCN 3.1 golden settings fix - eDP fixes - DMCUB fixes - GFX11 fixes and cleanups - VCN fix for yellow carp - GMC11 fixes - RAS fixes - GPUVM TLB flush fixes - SMU13 fixes - VCN3 AV1 regression fix - VCN2 JPEG fix - Other misc fixes amdkfd: - MMU notifier fix - Support for more GC 10.3.x families - Pinned BO handling fix - Partial migration bug fix panfrost: - fix a use after free ti-sn65dsi83: - fix invalid DT configuration panel: - two self refresh fixes ast: - multiple output fix -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmKi3XwACgkQDHTzWXnE hr4htxAApNBUYz+U6yRBXnXqWxGEIZXmqv9llWV7Nd2nEiM40B7icTmYvFxii9DY raTNLxf1LCcwNxT6K0+aZXCMqHCZfeyugFbywwqpMRfWsBUZE2hdS8nfTkQDnXae dmcVju+XOvL20wvrjXl/1OXOdHBqtnrd3eIT8cyBq6DGWF7bhjihXickSlpgoExF XmoXqsO/k/IXzN3I7rgiCAXbzvGlm5cHH/uhWOXqzhU2QqKt5bO8Nea5nViGJLBW 8Jou1CkEQ4D59ibOmVwuBAiPx5c1j7oV4DbPJP6YpchcOmUK4sl+xvI2ZkOTdI// kvhKmurSO4ZnP3/1fN3yZItOl597cR1JICabaCpuD2l3upny2DF3VH0Z1CwHTTw5 btiLSVOC616VpKsg7PaOsjd06T+NVGV1v0a/RQYp4OejOq9DXHLvG7o3r6owROkd cOiuDiAbdX+2yqytXl0+GUvp5NOVRaJQF2/ZxzeD3aW/6kB+liyS4JaBoPznsrON JzUDCVGewptX0mZrtWud6kJfnFpJydmWEfwGatwAECnjTdHXCPocH3w0XHXLtXCg SxnSKE9ewzlAAv3if7vyWTnBl1+ad952sUBdShN4U5rqdwfL1NQMjQiHzLTM0uhe vwsawNmuwrC/nKK9YPfhB8wwl3got6+dh84V0y5Qv2iTsG/IFa0= =bT36 -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2022-06-10' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Not a huge amount here, mainly a bunch of scattered amdgpu fixes, and then some misc panfrost, bridge/panel ones, and one ast fix for multi-monitors. Probably pick up a bit more next week like rc3 often does. amdgpu: - DCN 3.1 golden settings fix - eDP fixes - DMCUB fixes - GFX11 fixes and cleanups - VCN fix for yellow carp - GMC11 fixes - RAS fixes - GPUVM TLB flush fixes - SMU13 fixes - VCN3 AV1 regression fix - VCN2 JPEG fix - Other misc fixes amdkfd: - MMU notifier fix - Support for more GC 10.3.x families - Pinned BO handling fix - Partial migration bug fix panfrost: - fix a use after free ti-sn65dsi83: - fix invalid DT configuration panel: - two self refresh fixes ast: - multiple output fix" * tag 'drm-fixes-2022-06-10' of git://anongit.freedesktop.org/drm/drm: (37 commits) drm/ast: Support multiple outputs drm/amdgpu/mes: only invalid/prime icache when finish loading both pipe MES FWs. drm/amdgpu/jpeg2: Add jpeg vmid update under IB submit drm/amdgpu: always flush the TLB on gfx8 drm/amdgpu: fix limiting AV1 to the first instance on VCN3 drm/amdkfd:Fix fw version for 10.3.6 drm/amdgpu: Add MODE register to wave debug info in gfx11 Revert "drm/amd/display: Pass the new context into disable OTG WA" Revert "drm/amdgpu: Ensure the DMA engine is deactivated during set ups" drm/atomic: Force bridge self-refresh-exit on CRTC switch drm/bridge: analogix_dp: Support PSR-exit to disable transition drm/amdgpu: suppress the compile warning about 64 bit type drm/amd/pm: suppress compile warnings about possible unaligned accesses drm/amdkfd: Fix partial migration bugs drm/amdkfd: add pinned BOs to kfd_bo_list drm/amdgpu: Update PDEs flush TLB if PTB/PDB moved drm/amdgpu: enable tmz by default for GC 10.3.7 drm/amdkfd: Add GC 10.3.6 and 10.3.7 KFD definitions drm/amdkfd: Use mmget_not_zero in MMU notifier drm/amdgpu: Resolve RAS GFX error count issue after cold boot on Arcturus ...
This commit is contained in:
commit
8dd77d4479
@ -1918,9 +1918,6 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* delete kgd_mem from kfd_bo_list to avoid re-validating
|
||||
* this BO in BO's restoring after eviction.
|
||||
*/
|
||||
mutex_lock(&mem->process_info->lock);
|
||||
|
||||
ret = amdgpu_bo_reserve(bo, true);
|
||||
@ -1943,7 +1940,6 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev,
|
||||
|
||||
amdgpu_amdkfd_remove_eviction_fence(
|
||||
bo, mem->process_info->eviction_fence);
|
||||
list_del_init(&mem->validate_list.head);
|
||||
|
||||
if (size)
|
||||
*size = amdgpu_bo_size(bo);
|
||||
@ -2512,12 +2508,15 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
|
||||
process_info->eviction_fence = new_fence;
|
||||
*ef = dma_fence_get(&new_fence->base);
|
||||
|
||||
/* Attach new eviction fence to all BOs */
|
||||
/* Attach new eviction fence to all BOs except pinned ones */
|
||||
list_for_each_entry(mem, &process_info->kfd_bo_list,
|
||||
validate_list.head)
|
||||
validate_list.head) {
|
||||
if (mem->bo->tbo.pin_count)
|
||||
continue;
|
||||
|
||||
amdgpu_bo_fence(mem->bo,
|
||||
&process_info->eviction_fence->base, true);
|
||||
|
||||
}
|
||||
/* Attach eviction fence to PD / PT BOs */
|
||||
list_for_each_entry(peer_vm, &process_info->vm_list_head,
|
||||
vm_list_node) {
|
||||
|
@ -594,17 +594,20 @@ int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
|
||||
int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
|
||||
{
|
||||
int r;
|
||||
r = amdgpu_ras_block_late_init(adev, ras_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (amdgpu_ras_is_supported(adev, ras_block->block)) {
|
||||
if (!amdgpu_persistent_edc_harvesting_supported(adev))
|
||||
amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
|
||||
|
||||
r = amdgpu_ras_block_late_init(adev, ras_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
|
||||
if (r)
|
||||
goto late_fini;
|
||||
} else {
|
||||
amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -518,6 +518,8 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
|
||||
case IP_VERSION(9, 1, 0):
|
||||
/* RENOIR looks like RAVEN */
|
||||
case IP_VERSION(9, 3, 0):
|
||||
/* GC 10.3.7 */
|
||||
case IP_VERSION(10, 3, 7):
|
||||
if (amdgpu_tmz == 0) {
|
||||
adev->gmc.tmz_enabled = false;
|
||||
dev_info(adev->dev,
|
||||
@ -540,8 +542,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
|
||||
case IP_VERSION(10, 3, 1):
|
||||
/* YELLOW_CARP*/
|
||||
case IP_VERSION(10, 3, 3):
|
||||
/* GC 10.3.7 */
|
||||
case IP_VERSION(10, 3, 7):
|
||||
/* Don't enable it by default yet.
|
||||
*/
|
||||
if (amdgpu_tmz < 1) {
|
||||
|
@ -197,6 +197,13 @@ static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
|
||||
if (amdgpu_ras_query_error_status(obj->adev, &info))
|
||||
return -EINVAL;
|
||||
|
||||
/* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
|
||||
if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
|
||||
obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
|
||||
if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
|
||||
dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
|
||||
}
|
||||
|
||||
s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
|
||||
"ue", info.ue_count,
|
||||
"ce", info.ce_count);
|
||||
@ -550,9 +557,10 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
|
||||
if (amdgpu_ras_query_error_status(obj->adev, &info))
|
||||
return -EINVAL;
|
||||
|
||||
if (obj->adev->asic_type == CHIP_ALDEBARAN) {
|
||||
if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
|
||||
obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
|
||||
if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
|
||||
DRM_WARN("Failed to reset error counter and error status");
|
||||
dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
|
||||
}
|
||||
|
||||
return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
|
||||
@ -1027,9 +1035,6 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
|
||||
}
|
||||
}
|
||||
|
||||
if (!amdgpu_persistent_edc_harvesting_supported(adev))
|
||||
amdgpu_ras_reset_error_status(adev, info->head.block);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1149,6 +1154,12 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
|
||||
adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
|
||||
if (amdgpu_ras_reset_error_status(adev, info.head.block))
|
||||
dev_warn(adev->dev, "Failed to reset error counter and error status");
|
||||
}
|
||||
|
||||
ce += info.ce_count;
|
||||
ue += info.ue_count;
|
||||
}
|
||||
@ -1792,6 +1803,12 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
|
||||
continue;
|
||||
|
||||
amdgpu_ras_query_error_status(adev, &info);
|
||||
|
||||
if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
|
||||
adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
|
||||
if (amdgpu_ras_reset_error_status(adev, info.head.block))
|
||||
dev_warn(adev->dev, "Failed to reset error counter and error status");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2278,8 +2295,9 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
|
||||
!amdgpu_ras_asic_supported(adev))
|
||||
return;
|
||||
|
||||
if (!(amdgpu_sriov_vf(adev) &&
|
||||
(adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2))))
|
||||
/* If driver run on sriov guest side, only enable ras for aldebaran */
|
||||
if (amdgpu_sriov_vf(adev) &&
|
||||
adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 2))
|
||||
return;
|
||||
|
||||
if (!adev->gmc.xgmi.connected_to_cpu) {
|
||||
|
@ -679,6 +679,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
|
||||
{
|
||||
struct amdgpu_vm_update_params params;
|
||||
struct amdgpu_vm_bo_base *entry;
|
||||
bool flush_tlb_needed = false;
|
||||
int r, idx;
|
||||
|
||||
if (list_empty(&vm->relocated))
|
||||
@ -697,6 +698,9 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
|
||||
goto error;
|
||||
|
||||
list_for_each_entry(entry, &vm->relocated, vm_status) {
|
||||
/* vm_flush_needed after updating moved PDEs */
|
||||
flush_tlb_needed |= entry->moved;
|
||||
|
||||
r = amdgpu_vm_pde_update(¶ms, entry);
|
||||
if (r)
|
||||
goto error;
|
||||
@ -706,8 +710,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
/* vm_flush_needed after updating PDEs */
|
||||
atomic64_inc(&vm->tlb_seq);
|
||||
if (flush_tlb_needed)
|
||||
atomic64_inc(&vm->tlb_seq);
|
||||
|
||||
while (!list_empty(&vm->relocated)) {
|
||||
entry = list_first_entry(&vm->relocated,
|
||||
@ -789,6 +793,11 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
|
||||
adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0);
|
||||
|
||||
/*
|
||||
* On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
|
||||
*/
|
||||
flush_tlb |= adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 0);
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
params.adev = adev;
|
||||
params.vm = vm;
|
||||
|
@ -1096,6 +1096,7 @@ static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd,
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE);
|
||||
}
|
||||
|
||||
static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
|
||||
@ -1316,7 +1317,7 @@ static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *ade
|
||||
memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size);
|
||||
|
||||
if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME))
|
||||
*(uint64_t *)fw_autoload_mask |= 1 << id;
|
||||
*(uint64_t *)fw_autoload_mask |= 1ULL << id;
|
||||
}
|
||||
|
||||
static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev,
|
||||
@ -1983,7 +1984,7 @@ static int gfx_v11_0_init_csb(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void gfx_v11_0_rlc_stop(struct amdgpu_device *adev)
|
||||
static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL);
|
||||
|
||||
@ -6028,6 +6029,7 @@ static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev,
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -638,6 +638,12 @@ static int gmc_v11_0_mc_init(struct amdgpu_device *adev)
|
||||
adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
|
||||
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
|
||||
adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev);
|
||||
adev->gmc.aper_size = adev->gmc.real_vram_size;
|
||||
}
|
||||
#endif
|
||||
/* In case the PCI BAR is larger than the actual amount of vram */
|
||||
adev->gmc.visible_vram_size = adev->gmc.aper_size;
|
||||
if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
|
||||
|
@ -299,7 +299,7 @@ static const struct imu_rlc_ram_golden imu_rlc_ram_golden_11_0_2[] =
|
||||
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCPG_PSP_DEBUG, CPG_PSP_DEBUG__GPA_OVERRIDE_MASK, 0)
|
||||
};
|
||||
|
||||
void program_imu_rlc_ram(struct amdgpu_device *adev,
|
||||
static void program_imu_rlc_ram(struct amdgpu_device *adev,
|
||||
const struct imu_rlc_ram_golden *regs,
|
||||
const u32 array_size)
|
||||
{
|
||||
|
@ -535,6 +535,10 @@ void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
{
|
||||
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_IH_CTRL_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, (vmid << JPEG_IH_CTRL__IH_VMID__SHIFT));
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, (vmid | (vmid << 4)));
|
||||
@ -768,7 +772,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
|
||||
8 + /* jpeg_v2_0_dec_ring_emit_vm_flush */
|
||||
18 + 18 + /* jpeg_v2_0_dec_ring_emit_fence x2 vm fence */
|
||||
8 + 16,
|
||||
.emit_ib_size = 22, /* jpeg_v2_0_dec_ring_emit_ib */
|
||||
.emit_ib_size = 24, /* jpeg_v2_0_dec_ring_emit_ib */
|
||||
.emit_ib = jpeg_v2_0_dec_ring_emit_ib,
|
||||
.emit_fence = jpeg_v2_0_dec_ring_emit_fence,
|
||||
.emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
|
||||
|
@ -41,6 +41,7 @@
|
||||
#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084
|
||||
#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089
|
||||
#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
|
||||
#define mmUVD_JPEG_IH_CTRL_INTERNAL_OFFSET 0x4149
|
||||
|
||||
#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
|
||||
|
||||
|
@ -541,7 +541,7 @@ static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable)
|
||||
|
||||
/* This function is for backdoor MES firmware */
|
||||
static int mes_v11_0_load_microcode(struct amdgpu_device *adev,
|
||||
enum admgpu_mes_pipe pipe)
|
||||
enum admgpu_mes_pipe pipe, bool prime_icache)
|
||||
{
|
||||
int r;
|
||||
uint32_t data;
|
||||
@ -593,16 +593,18 @@ static int mes_v11_0_load_microcode(struct amdgpu_device *adev,
|
||||
/* Set 0x3FFFF (256K-1) to CP_MES_MDBOUND_LO */
|
||||
WREG32_SOC15(GC, 0, regCP_MES_MDBOUND_LO, 0x3FFFF);
|
||||
|
||||
/* invalidate ICACHE */
|
||||
data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
|
||||
data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0);
|
||||
data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1);
|
||||
WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
|
||||
if (prime_icache) {
|
||||
/* invalidate ICACHE */
|
||||
data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
|
||||
data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0);
|
||||
data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1);
|
||||
WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
|
||||
|
||||
/* prime the ICACHE. */
|
||||
data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
|
||||
data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1);
|
||||
WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
|
||||
/* prime the ICACHE. */
|
||||
data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
|
||||
data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1);
|
||||
WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
|
||||
}
|
||||
|
||||
soc21_grbm_select(adev, 0, 0, 0, 0);
|
||||
mutex_unlock(&adev->srbm_mutex);
|
||||
@ -1044,17 +1046,19 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
|
||||
int r = 0;
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
|
||||
r = mes_v11_0_load_microcode(adev, AMDGPU_MES_KIQ_PIPE);
|
||||
|
||||
r = mes_v11_0_load_microcode(adev, AMDGPU_MES_SCHED_PIPE, false);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to load MES fw, r=%d\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = mes_v11_0_load_microcode(adev, AMDGPU_MES_KIQ_PIPE, true);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to load MES kiq fw, r=%d\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = mes_v11_0_load_microcode(adev, AMDGPU_MES_SCHED_PIPE);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to load MES fw, r=%d\n", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
mes_v11_0_enable(adev, true);
|
||||
@ -1086,7 +1090,7 @@ static int mes_v11_0_hw_init(void *handle)
|
||||
if (!adev->enable_mes_kiq) {
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
|
||||
r = mes_v11_0_load_microcode(adev,
|
||||
AMDGPU_MES_SCHED_PIPE);
|
||||
AMDGPU_MES_SCHED_PIPE, true);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to MES fw, r=%d\n", r);
|
||||
return r;
|
||||
|
@ -170,6 +170,7 @@ static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static const struct amdgpu_video_codecs yc_video_codecs_decode = {
|
||||
|
@ -469,6 +469,7 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* sdma_v5_2_gfx_stop - stop the gfx async dma engines
|
||||
*
|
||||
@ -514,21 +515,17 @@ static void sdma_v5_2_rlc_stop(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
/**
|
||||
* sdma_v5_2_ctx_switch_enable_for_instance - start the async dma engines
|
||||
* context switch for an instance
|
||||
* sdma_v5_2_ctx_switch_enable - stop the async dma engines context switch
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @instance_idx: the index of the SDMA instance
|
||||
* @enable: enable/disable the DMA MEs context switch.
|
||||
*
|
||||
* Unhalt the async dma engines context switch.
|
||||
* Halt or unhalt the async dma engines context switch.
|
||||
*/
|
||||
static void sdma_v5_2_ctx_switch_enable_for_instance(struct amdgpu_device *adev, int instance_idx)
|
||||
static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
u32 f32_cntl, phase_quantum = 0;
|
||||
|
||||
if (WARN_ON(instance_idx >= adev->sdma.num_instances)) {
|
||||
return;
|
||||
}
|
||||
int i;
|
||||
|
||||
if (amdgpu_sdma_phase_quantum) {
|
||||
unsigned value = amdgpu_sdma_phase_quantum;
|
||||
@ -552,68 +549,50 @@ static void sdma_v5_2_ctx_switch_enable_for_instance(struct amdgpu_device *adev,
|
||||
phase_quantum =
|
||||
value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
|
||||
unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
|
||||
|
||||
WREG32_SOC15_IP(GC,
|
||||
sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE0_QUANTUM),
|
||||
phase_quantum);
|
||||
WREG32_SOC15_IP(GC,
|
||||
sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE1_QUANTUM),
|
||||
phase_quantum);
|
||||
WREG32_SOC15_IP(GC,
|
||||
sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE2_QUANTUM),
|
||||
phase_quantum);
|
||||
}
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_CNTL));
|
||||
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
|
||||
AUTO_CTXSW_ENABLE, 1);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_CNTL), f32_cntl);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* sdma_v5_2_ctx_switch_disable_all - stop the async dma engines context switch
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Halt the async dma engines context switch.
|
||||
*/
|
||||
static void sdma_v5_2_ctx_switch_disable_all(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 f32_cntl;
|
||||
int i;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return;
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
|
||||
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
|
||||
AUTO_CTXSW_ENABLE, 0);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
|
||||
if (enable && amdgpu_sdma_phase_quantum) {
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
|
||||
phase_quantum);
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
|
||||
phase_quantum);
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
|
||||
phase_quantum);
|
||||
}
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
|
||||
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
|
||||
AUTO_CTXSW_ENABLE, enable ? 1 : 0);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* sdma_v5_2_halt - stop the async dma engines
|
||||
* sdma_v5_2_enable - stop the async dma engines
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @enable: enable/disable the DMA MEs.
|
||||
*
|
||||
* Halt the async dma engines.
|
||||
* Halt or unhalt the async dma engines.
|
||||
*/
|
||||
static void sdma_v5_2_halt(struct amdgpu_device *adev)
|
||||
static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
int i;
|
||||
u32 f32_cntl;
|
||||
int i;
|
||||
|
||||
sdma_v5_2_gfx_stop(adev);
|
||||
sdma_v5_2_rlc_stop(adev);
|
||||
if (!enable) {
|
||||
sdma_v5_2_gfx_stop(adev);
|
||||
sdma_v5_2_rlc_stop(adev);
|
||||
}
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
|
||||
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
|
||||
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
|
||||
}
|
||||
}
|
||||
@ -625,9 +604,6 @@ static void sdma_v5_2_halt(struct amdgpu_device *adev)
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Set up the gfx DMA ring buffers and enable them.
|
||||
* It assumes that the dma engine is stopped for each instance.
|
||||
* The function enables the engine and preemptions sequentially for each instance.
|
||||
*
|
||||
* Returns 0 for success, error for failure.
|
||||
*/
|
||||
static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
|
||||
@ -769,7 +745,10 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
|
||||
|
||||
ring->sched.ready = true;
|
||||
|
||||
sdma_v5_2_ctx_switch_enable_for_instance(adev, i);
|
||||
if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
|
||||
sdma_v5_2_ctx_switch_enable(adev, true);
|
||||
sdma_v5_2_enable(adev, true);
|
||||
}
|
||||
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
@ -813,7 +792,7 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
|
||||
int i, j;
|
||||
|
||||
/* halt the MEs */
|
||||
sdma_v5_2_halt(adev);
|
||||
sdma_v5_2_enable(adev, false);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
if (!adev->sdma.instance[i].fw)
|
||||
@ -885,8 +864,8 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
|
||||
int r = 0;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
sdma_v5_2_ctx_switch_disable_all(adev);
|
||||
sdma_v5_2_halt(adev);
|
||||
sdma_v5_2_ctx_switch_enable(adev, false);
|
||||
sdma_v5_2_enable(adev, false);
|
||||
|
||||
/* set RB registers */
|
||||
r = sdma_v5_2_gfx_resume(adev);
|
||||
@ -910,10 +889,12 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
|
||||
amdgpu_gfx_off_ctrl(adev, false);
|
||||
|
||||
sdma_v5_2_soft_reset(adev);
|
||||
/* unhalt the MEs */
|
||||
sdma_v5_2_enable(adev, true);
|
||||
/* enable sdma ring preemption */
|
||||
sdma_v5_2_ctx_switch_enable(adev, true);
|
||||
|
||||
/* Soft reset supposes to disable the dma engine and preemption.
|
||||
* Now start the gfx rings and rlc compute queues.
|
||||
*/
|
||||
/* start the gfx rings and rlc compute queues */
|
||||
r = sdma_v5_2_gfx_resume(adev);
|
||||
if (adev->in_s0ix)
|
||||
amdgpu_gfx_off_ctrl(adev, true);
|
||||
@ -1447,8 +1428,8 @@ static int sdma_v5_2_hw_fini(void *handle)
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
sdma_v5_2_ctx_switch_disable_all(adev);
|
||||
sdma_v5_2_halt(adev);
|
||||
sdma_v5_2_ctx_switch_enable(adev, false);
|
||||
sdma_v5_2_enable(adev, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1761,23 +1761,21 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
};
|
||||
|
||||
static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_job *job)
|
||||
static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p)
|
||||
{
|
||||
struct drm_gpu_scheduler **scheds;
|
||||
|
||||
/* The create msg must be in the first IB submitted */
|
||||
if (atomic_read(&job->base.entity->fence_seq))
|
||||
if (atomic_read(&p->entity->fence_seq))
|
||||
return -EINVAL;
|
||||
|
||||
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
|
||||
[AMDGPU_RING_PRIO_DEFAULT].sched;
|
||||
drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
|
||||
drm_sched_entity_modify_sched(p->entity, scheds, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
|
||||
uint64_t addr)
|
||||
static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
|
||||
{
|
||||
struct ttm_operation_ctx ctx = { false, false };
|
||||
struct amdgpu_bo_va_mapping *map;
|
||||
@ -1848,7 +1846,7 @@ static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
|
||||
if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
|
||||
continue;
|
||||
|
||||
r = vcn_v3_0_limit_sched(p, job);
|
||||
r = vcn_v3_0_limit_sched(p);
|
||||
if (r)
|
||||
goto out;
|
||||
}
|
||||
@ -1862,7 +1860,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_job *job,
|
||||
struct amdgpu_ib *ib)
|
||||
{
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
|
||||
uint32_t msg_lo = 0, msg_hi = 0;
|
||||
unsigned i;
|
||||
int r;
|
||||
@ -1881,8 +1879,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
|
||||
msg_hi = val;
|
||||
} else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) &&
|
||||
val == 0) {
|
||||
r = vcn_v3_0_dec_msg(p, job,
|
||||
((u64)msg_hi) << 32 | msg_lo);
|
||||
r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -1516,6 +1516,8 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
|
||||
num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info);
|
||||
break;
|
||||
case IP_VERSION(10, 3, 3):
|
||||
case IP_VERSION(10, 3, 6): /* TODO: Double check these on production silicon */
|
||||
case IP_VERSION(10, 3, 7): /* TODO: Double check these on production silicon */
|
||||
pcache_info = yellow_carp_cache_info;
|
||||
num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info);
|
||||
break;
|
||||
|
@ -73,6 +73,8 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
|
||||
case IP_VERSION(4, 1, 2):/* RENOIR */
|
||||
case IP_VERSION(5, 2, 1):/* VANGOGH */
|
||||
case IP_VERSION(5, 2, 3):/* YELLOW_CARP */
|
||||
case IP_VERSION(5, 2, 6):/* GC 10.3.6 */
|
||||
case IP_VERSION(5, 2, 7):/* GC 10.3.7 */
|
||||
case IP_VERSION(6, 0, 1):
|
||||
kfd->device_info.num_sdma_queues_per_engine = 2;
|
||||
break;
|
||||
@ -127,6 +129,8 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
|
||||
case IP_VERSION(9, 4, 2): /* ALDEBARAN */
|
||||
case IP_VERSION(10, 3, 1): /* VANGOGH */
|
||||
case IP_VERSION(10, 3, 3): /* YELLOW_CARP */
|
||||
case IP_VERSION(10, 3, 6): /* GC 10.3.6 */
|
||||
case IP_VERSION(10, 3, 7): /* GC 10.3.7 */
|
||||
case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */
|
||||
case IP_VERSION(10, 1, 4):
|
||||
case IP_VERSION(10, 1, 10): /* NAVI10 */
|
||||
@ -178,7 +182,9 @@ static void kfd_device_info_init(struct kfd_dev *kfd,
|
||||
|
||||
if (gc_version < IP_VERSION(11, 0, 0)) {
|
||||
/* Navi2x+, Navi1x+ */
|
||||
if (gc_version >= IP_VERSION(10, 3, 0))
|
||||
if (gc_version == IP_VERSION(10, 3, 6))
|
||||
kfd->device_info.no_atomic_fw_version = 14;
|
||||
else if (gc_version >= IP_VERSION(10, 3, 0))
|
||||
kfd->device_info.no_atomic_fw_version = 92;
|
||||
else if (gc_version >= IP_VERSION(10, 1, 1))
|
||||
kfd->device_info.no_atomic_fw_version = 145;
|
||||
@ -368,6 +374,16 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
|
||||
if (!vf)
|
||||
f2g = &gfx_v10_3_kfd2kgd;
|
||||
break;
|
||||
case IP_VERSION(10, 3, 6):
|
||||
gfx_target_version = 100306;
|
||||
if (!vf)
|
||||
f2g = &gfx_v10_3_kfd2kgd;
|
||||
break;
|
||||
case IP_VERSION(10, 3, 7):
|
||||
gfx_target_version = 100307;
|
||||
if (!vf)
|
||||
f2g = &gfx_v10_3_kfd2kgd;
|
||||
break;
|
||||
case IP_VERSION(11, 0, 0):
|
||||
gfx_target_version = 110000;
|
||||
f2g = &gfx_v11_kfd2kgd;
|
||||
|
@ -296,7 +296,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
|
||||
struct migrate_vma *migrate, struct dma_fence **mfence,
|
||||
dma_addr_t *scratch)
|
||||
{
|
||||
uint64_t npages = migrate->cpages;
|
||||
uint64_t npages = migrate->npages;
|
||||
struct device *dev = adev->dev;
|
||||
struct amdgpu_res_cursor cursor;
|
||||
dma_addr_t *src;
|
||||
@ -344,7 +344,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
|
||||
mfence);
|
||||
if (r)
|
||||
goto out_free_vram_pages;
|
||||
amdgpu_res_next(&cursor, j << PAGE_SHIFT);
|
||||
amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT);
|
||||
j = 0;
|
||||
} else {
|
||||
amdgpu_res_next(&cursor, PAGE_SIZE);
|
||||
@ -590,7 +590,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
|
||||
continue;
|
||||
}
|
||||
src[i] = svm_migrate_addr(adev, spage);
|
||||
if (i > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
|
||||
if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
|
||||
r = svm_migrate_copy_memory_gart(adev, dst + i - j,
|
||||
src + i - j, j,
|
||||
FROM_VRAM_TO_RAM,
|
||||
|
@ -1295,7 +1295,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
|
||||
r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL,
|
||||
last_start, prange->start + i,
|
||||
pte_flags,
|
||||
last_start - prange->start,
|
||||
(last_start - prange->start) << PAGE_SHIFT,
|
||||
bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
|
||||
NULL, dma_addr, &vm->last_update);
|
||||
|
||||
@ -2307,6 +2307,8 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
|
||||
|
||||
if (range->event == MMU_NOTIFY_RELEASE)
|
||||
return true;
|
||||
if (!mmget_not_zero(mni->mm))
|
||||
return true;
|
||||
|
||||
start = mni->interval_tree.start;
|
||||
last = mni->interval_tree.last;
|
||||
@ -2333,6 +2335,7 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
|
||||
}
|
||||
|
||||
svm_range_unlock(prange);
|
||||
mmput(mni->mm);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -287,8 +287,11 @@ static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base)
|
||||
|
||||
void dcn31_init_clocks(struct clk_mgr *clk_mgr)
|
||||
{
|
||||
uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
|
||||
|
||||
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
|
||||
// Assumption is that boot state always supports pstate
|
||||
clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
|
||||
clk_mgr->clks.p_state_change_support = true;
|
||||
clk_mgr->clks.prev_p_state_change_support = true;
|
||||
clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
|
||||
@ -638,8 +641,14 @@ static void dcn31_set_low_power_state(struct clk_mgr *clk_mgr_base)
|
||||
}
|
||||
}
|
||||
|
||||
int dcn31_get_dtb_ref_freq_khz(struct clk_mgr *clk_mgr_base)
|
||||
{
|
||||
return clk_mgr_base->clks.ref_dtbclk_khz;
|
||||
}
|
||||
|
||||
static struct clk_mgr_funcs dcn31_funcs = {
|
||||
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
|
||||
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
|
||||
.update_clocks = dcn31_update_clocks,
|
||||
.init_clocks = dcn31_init_clocks,
|
||||
.enable_pme_wa = dcn31_enable_pme_wa,
|
||||
@ -719,7 +728,7 @@ void dcn31_clk_mgr_construct(
|
||||
}
|
||||
|
||||
clk_mgr->base.base.dprefclk_khz = 600000;
|
||||
clk_mgr->base.dccg->ref_dtbclk_khz = 600000;
|
||||
clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
|
||||
dce_clock_read_ss_info(&clk_mgr->base);
|
||||
/*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/
|
||||
//clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(clk_mgr_internal, clk_mgr->base.base.dprefclk_khz);
|
||||
|
@ -51,6 +51,8 @@ void dcn31_clk_mgr_construct(struct dc_context *ctx,
|
||||
struct pp_smu_funcs *pp_smu,
|
||||
struct dccg *dccg);
|
||||
|
||||
int dcn31_get_dtb_ref_freq_khz(struct clk_mgr *clk_mgr_base);
|
||||
|
||||
void dcn31_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
|
||||
|
||||
#endif //__DCN31_CLK_MGR_H__
|
||||
|
@ -41,9 +41,7 @@
|
||||
|
||||
#include "dc_dmub_srv.h"
|
||||
|
||||
#if defined (CONFIG_DRM_AMD_DC_DP2_0)
|
||||
#include "dc_link_dp.h"
|
||||
#endif
|
||||
|
||||
#define TO_CLK_MGR_DCN315(clk_mgr)\
|
||||
container_of(clk_mgr, struct clk_mgr_dcn315, base)
|
||||
@ -580,6 +578,7 @@ static void dcn315_enable_pme_wa(struct clk_mgr *clk_mgr_base)
|
||||
|
||||
static struct clk_mgr_funcs dcn315_funcs = {
|
||||
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
|
||||
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
|
||||
.update_clocks = dcn315_update_clocks,
|
||||
.init_clocks = dcn31_init_clocks,
|
||||
.enable_pme_wa = dcn315_enable_pme_wa,
|
||||
@ -656,9 +655,9 @@ void dcn315_clk_mgr_construct(
|
||||
|
||||
clk_mgr->base.base.dprefclk_khz = 600000;
|
||||
clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base);
|
||||
clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
|
||||
clk_mgr->base.base.clks.ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
|
||||
dce_clock_read_ss_info(&clk_mgr->base);
|
||||
clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);
|
||||
clk_mgr->base.base.clks.ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);
|
||||
|
||||
clk_mgr->base.base.bw_params = &dcn315_bw_params;
|
||||
|
||||
|
@ -571,6 +571,7 @@ static void dcn316_clk_mgr_helper_populate_bw_params(
|
||||
static struct clk_mgr_funcs dcn316_funcs = {
|
||||
.enable_pme_wa = dcn316_enable_pme_wa,
|
||||
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
|
||||
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
|
||||
.update_clocks = dcn316_update_clocks,
|
||||
.init_clocks = dcn31_init_clocks,
|
||||
.are_clock_states_equal = dcn31_are_clock_states_equal,
|
||||
@ -685,7 +686,7 @@ void dcn316_clk_mgr_construct(
|
||||
|
||||
clk_mgr->base.base.dprefclk_khz = 600000;
|
||||
clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base);
|
||||
clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
|
||||
clk_mgr->base.base.clks.ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
|
||||
dce_clock_read_ss_info(&clk_mgr->base);
|
||||
/*clk_mgr->base.dccg->ref_dtbclk_khz =
|
||||
dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);*/
|
||||
|
@ -114,8 +114,8 @@ static const struct dc_link_settings fail_safe_link_settings = {
|
||||
|
||||
static bool decide_fallback_link_setting(
|
||||
struct dc_link *link,
|
||||
struct dc_link_settings initial_link_settings,
|
||||
struct dc_link_settings *current_link_setting,
|
||||
struct dc_link_settings *max,
|
||||
struct dc_link_settings *cur,
|
||||
enum link_training_result training_result);
|
||||
static void maximize_lane_settings(const struct link_training_settings *lt_settings,
|
||||
struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
|
||||
@ -2784,6 +2784,7 @@ bool perform_link_training_with_retries(
|
||||
enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
|
||||
enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0;
|
||||
struct dc_link_settings cur_link_settings = *link_setting;
|
||||
struct dc_link_settings max_link_settings = *link_setting;
|
||||
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
|
||||
int fail_count = 0;
|
||||
bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */
|
||||
@ -2793,7 +2794,6 @@ bool perform_link_training_with_retries(
|
||||
|
||||
dp_trace_commit_lt_init(link);
|
||||
|
||||
|
||||
if (dp_get_link_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING)
|
||||
/* We need to do this before the link training to ensure the idle
|
||||
* pattern in SST mode will be sent right after the link training
|
||||
@ -2909,19 +2909,15 @@ bool perform_link_training_with_retries(
|
||||
uint32_t req_bw;
|
||||
uint32_t link_bw;
|
||||
|
||||
decide_fallback_link_setting(link, *link_setting, &cur_link_settings, status);
|
||||
/* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to
|
||||
* minimum link bandwidth.
|
||||
decide_fallback_link_setting(link, &max_link_settings,
|
||||
&cur_link_settings, status);
|
||||
/* Fail link training if reduced link bandwidth no longer meets
|
||||
* stream requirements.
|
||||
*/
|
||||
req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
|
||||
link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings);
|
||||
is_link_bw_low = (req_bw > link_bw);
|
||||
is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
|
||||
(cur_link_settings.lane_count <= LANE_COUNT_ONE));
|
||||
|
||||
if (is_link_bw_low)
|
||||
DC_LOG_WARNING("%s: Link bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n",
|
||||
__func__, req_bw, link_bw);
|
||||
if (req_bw > link_bw)
|
||||
break;
|
||||
}
|
||||
|
||||
msleep(delay_between_attempts);
|
||||
@ -3309,7 +3305,7 @@ static bool dp_verify_link_cap(
|
||||
int *fail_count)
|
||||
{
|
||||
struct dc_link_settings cur_link_settings = {0};
|
||||
struct dc_link_settings initial_link_settings = *known_limit_link_setting;
|
||||
struct dc_link_settings max_link_settings = *known_limit_link_setting;
|
||||
bool success = false;
|
||||
bool skip_video_pattern;
|
||||
enum clock_source_id dp_cs_id = get_clock_source_id(link);
|
||||
@ -3318,7 +3314,7 @@ static bool dp_verify_link_cap(
|
||||
struct link_resource link_res;
|
||||
|
||||
memset(&irq_data, 0, sizeof(irq_data));
|
||||
cur_link_settings = initial_link_settings;
|
||||
cur_link_settings = max_link_settings;
|
||||
|
||||
/* Grant extended timeout request */
|
||||
if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (link->dpcd_caps.lttpr_caps.max_ext_timeout > 0)) {
|
||||
@ -3361,7 +3357,7 @@ static bool dp_verify_link_cap(
|
||||
dp_trace_lt_result_update(link, status, true);
|
||||
dp_disable_link_phy(link, &link_res, link->connector_signal);
|
||||
} while (!success && decide_fallback_link_setting(link,
|
||||
initial_link_settings, &cur_link_settings, status));
|
||||
&max_link_settings, &cur_link_settings, status));
|
||||
|
||||
link->verified_link_cap = success ?
|
||||
cur_link_settings : fail_safe_link_settings;
|
||||
@ -3596,16 +3592,19 @@ static bool decide_fallback_link_setting_max_bw_policy(
|
||||
*/
|
||||
static bool decide_fallback_link_setting(
|
||||
struct dc_link *link,
|
||||
struct dc_link_settings initial_link_settings,
|
||||
struct dc_link_settings *current_link_setting,
|
||||
struct dc_link_settings *max,
|
||||
struct dc_link_settings *cur,
|
||||
enum link_training_result training_result)
|
||||
{
|
||||
if (!current_link_setting)
|
||||
if (!cur)
|
||||
return false;
|
||||
if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING ||
|
||||
if (!max)
|
||||
return false;
|
||||
|
||||
if (dp_get_link_encoding_format(max) == DP_128b_132b_ENCODING ||
|
||||
link->dc->debug.force_dp2_lt_fallback_method)
|
||||
return decide_fallback_link_setting_max_bw_policy(link, &initial_link_settings,
|
||||
current_link_setting, training_result);
|
||||
return decide_fallback_link_setting_max_bw_policy(link, max, cur,
|
||||
training_result);
|
||||
|
||||
switch (training_result) {
|
||||
case LINK_TRAINING_CR_FAIL_LANE0:
|
||||
@ -3613,28 +3612,18 @@ static bool decide_fallback_link_setting(
|
||||
case LINK_TRAINING_CR_FAIL_LANE23:
|
||||
case LINK_TRAINING_LQA_FAIL:
|
||||
{
|
||||
if (!reached_minimum_link_rate
|
||||
(current_link_setting->link_rate)) {
|
||||
current_link_setting->link_rate =
|
||||
reduce_link_rate(
|
||||
current_link_setting->link_rate);
|
||||
} else if (!reached_minimum_lane_count
|
||||
(current_link_setting->lane_count)) {
|
||||
current_link_setting->link_rate =
|
||||
initial_link_settings.link_rate;
|
||||
if (!reached_minimum_link_rate(cur->link_rate)) {
|
||||
cur->link_rate = reduce_link_rate(cur->link_rate);
|
||||
} else if (!reached_minimum_lane_count(cur->lane_count)) {
|
||||
cur->link_rate = max->link_rate;
|
||||
if (training_result == LINK_TRAINING_CR_FAIL_LANE0)
|
||||
return false;
|
||||
else if (training_result == LINK_TRAINING_CR_FAIL_LANE1)
|
||||
current_link_setting->lane_count =
|
||||
LANE_COUNT_ONE;
|
||||
else if (training_result ==
|
||||
LINK_TRAINING_CR_FAIL_LANE23)
|
||||
current_link_setting->lane_count =
|
||||
LANE_COUNT_TWO;
|
||||
cur->lane_count = LANE_COUNT_ONE;
|
||||
else if (training_result == LINK_TRAINING_CR_FAIL_LANE23)
|
||||
cur->lane_count = LANE_COUNT_TWO;
|
||||
else
|
||||
current_link_setting->lane_count =
|
||||
reduce_lane_count(
|
||||
current_link_setting->lane_count);
|
||||
cur->lane_count = reduce_lane_count(cur->lane_count);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
@ -3642,17 +3631,17 @@ static bool decide_fallback_link_setting(
|
||||
}
|
||||
case LINK_TRAINING_EQ_FAIL_EQ:
|
||||
{
|
||||
if (!reached_minimum_lane_count
|
||||
(current_link_setting->lane_count)) {
|
||||
current_link_setting->lane_count =
|
||||
reduce_lane_count(
|
||||
current_link_setting->lane_count);
|
||||
} else if (!reached_minimum_link_rate
|
||||
(current_link_setting->link_rate)) {
|
||||
current_link_setting->link_rate =
|
||||
reduce_link_rate(
|
||||
current_link_setting->link_rate);
|
||||
current_link_setting->lane_count = initial_link_settings.lane_count;
|
||||
if (!reached_minimum_lane_count(cur->lane_count)) {
|
||||
cur->lane_count = reduce_lane_count(cur->lane_count);
|
||||
} else if (!reached_minimum_link_rate(cur->link_rate)) {
|
||||
cur->link_rate = reduce_link_rate(cur->link_rate);
|
||||
/* Reduce max link rate to avoid potential infinite loop.
|
||||
* Needed so that any subsequent CR_FAIL fallback can't
|
||||
* re-set the link rate higher than the link rate from
|
||||
* the latest EQ_FAIL fallback.
|
||||
*/
|
||||
max->link_rate = cur->link_rate;
|
||||
cur->lane_count = max->lane_count;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
@ -3660,12 +3649,15 @@ static bool decide_fallback_link_setting(
|
||||
}
|
||||
case LINK_TRAINING_EQ_FAIL_CR:
|
||||
{
|
||||
if (!reached_minimum_link_rate
|
||||
(current_link_setting->link_rate)) {
|
||||
current_link_setting->link_rate =
|
||||
reduce_link_rate(
|
||||
current_link_setting->link_rate);
|
||||
current_link_setting->lane_count = initial_link_settings.lane_count;
|
||||
if (!reached_minimum_link_rate(cur->link_rate)) {
|
||||
cur->link_rate = reduce_link_rate(cur->link_rate);
|
||||
/* Reduce max link rate to avoid potential infinite loop.
|
||||
* Needed so that any subsequent CR_FAIL fallback can't
|
||||
* re-set the link rate higher than the link rate from
|
||||
* the latest EQ_FAIL fallback.
|
||||
*/
|
||||
max->link_rate = cur->link_rate;
|
||||
cur->lane_count = max->lane_count;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ struct aux_payload;
|
||||
struct set_config_cmd_payload;
|
||||
struct dmub_notification;
|
||||
|
||||
#define DC_VER "3.2.186"
|
||||
#define DC_VER "3.2.187"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_PLANES 6
|
||||
@ -416,6 +416,7 @@ struct dc_clocks {
|
||||
bool p_state_change_support;
|
||||
enum dcn_zstate_support_state zstate_support;
|
||||
bool dtbclk_en;
|
||||
int ref_dtbclk_khz;
|
||||
enum dcn_pwr_state pwr_state;
|
||||
/*
|
||||
* Elements below are not compared for the purposes of
|
||||
@ -719,6 +720,8 @@ struct dc_debug_options {
|
||||
bool apply_vendor_specific_lttpr_wa;
|
||||
bool extended_blank_optimization;
|
||||
union aux_wake_wa_options aux_wake_wa;
|
||||
/* uses value at boot and disables switch */
|
||||
bool disable_dtb_ref_clk_switch;
|
||||
uint8_t psr_power_use_phy_fsm;
|
||||
enum dml_hostvm_override_opts dml_hostvm_override;
|
||||
};
|
||||
|
@ -513,12 +513,10 @@ void dccg31_set_physymclk(
|
||||
/* Controls the generation of pixel valid for OTG in (OTG -> HPO case) */
|
||||
static void dccg31_set_dtbclk_dto(
|
||||
struct dccg *dccg,
|
||||
int dtbclk_inst,
|
||||
int req_dtbclk_khz,
|
||||
int num_odm_segments,
|
||||
const struct dc_crtc_timing *timing)
|
||||
struct dtbclk_dto_params *params)
|
||||
{
|
||||
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
|
||||
int req_dtbclk_khz = params->pixclk_khz;
|
||||
uint32_t dtbdto_div;
|
||||
|
||||
/* Mode DTBDTO Rate DTBCLK_DTO<x>_DIV Register
|
||||
@ -529,57 +527,53 @@ static void dccg31_set_dtbclk_dto(
|
||||
* DSC native 4:2:2 pixel rate/2 4
|
||||
* Other modes pixel rate 8
|
||||
*/
|
||||
if (num_odm_segments == 4) {
|
||||
if (params->num_odm_segments == 4) {
|
||||
dtbdto_div = 2;
|
||||
req_dtbclk_khz = req_dtbclk_khz / 4;
|
||||
} else if ((num_odm_segments == 2) ||
|
||||
(timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ||
|
||||
(timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422
|
||||
&& !timing->dsc_cfg.ycbcr422_simple)) {
|
||||
req_dtbclk_khz = params->pixclk_khz / 4;
|
||||
} else if ((params->num_odm_segments == 2) ||
|
||||
(params->timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ||
|
||||
(params->timing->flags.DSC && params->timing->pixel_encoding == PIXEL_ENCODING_YCBCR422
|
||||
&& !params->timing->dsc_cfg.ycbcr422_simple)) {
|
||||
dtbdto_div = 4;
|
||||
req_dtbclk_khz = req_dtbclk_khz / 2;
|
||||
req_dtbclk_khz = params->pixclk_khz / 2;
|
||||
} else
|
||||
dtbdto_div = 8;
|
||||
|
||||
if (dccg->ref_dtbclk_khz && req_dtbclk_khz) {
|
||||
if (params->ref_dtbclk_khz && req_dtbclk_khz) {
|
||||
uint32_t modulo, phase;
|
||||
|
||||
// phase / modulo = dtbclk / dtbclk ref
|
||||
modulo = dccg->ref_dtbclk_khz * 1000;
|
||||
phase = div_u64((((unsigned long long)modulo * req_dtbclk_khz) + dccg->ref_dtbclk_khz - 1),
|
||||
dccg->ref_dtbclk_khz);
|
||||
modulo = params->ref_dtbclk_khz * 1000;
|
||||
phase = div_u64((((unsigned long long)modulo * req_dtbclk_khz) + params->ref_dtbclk_khz - 1),
|
||||
params->ref_dtbclk_khz);
|
||||
|
||||
REG_UPDATE(OTG_PIXEL_RATE_CNTL[dtbclk_inst],
|
||||
DTBCLK_DTO_DIV[dtbclk_inst], dtbdto_div);
|
||||
REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
|
||||
DTBCLK_DTO_DIV[params->otg_inst], dtbdto_div);
|
||||
|
||||
REG_WRITE(DTBCLK_DTO_MODULO[dtbclk_inst], modulo);
|
||||
REG_WRITE(DTBCLK_DTO_PHASE[dtbclk_inst], phase);
|
||||
REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], modulo);
|
||||
REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], phase);
|
||||
|
||||
REG_UPDATE(OTG_PIXEL_RATE_CNTL[dtbclk_inst],
|
||||
DTBCLK_DTO_ENABLE[dtbclk_inst], 1);
|
||||
REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
|
||||
DTBCLK_DTO_ENABLE[params->otg_inst], 1);
|
||||
|
||||
REG_WAIT(OTG_PIXEL_RATE_CNTL[dtbclk_inst],
|
||||
DTBCLKDTO_ENABLE_STATUS[dtbclk_inst], 1,
|
||||
REG_WAIT(OTG_PIXEL_RATE_CNTL[params->otg_inst],
|
||||
DTBCLKDTO_ENABLE_STATUS[params->otg_inst], 1,
|
||||
1, 100);
|
||||
|
||||
/* The recommended programming sequence to enable DTBCLK DTO to generate
|
||||
* valid pixel HPO DPSTREAM ENCODER, specifies that DTO source select should
|
||||
* be set only after DTO is enabled
|
||||
*/
|
||||
REG_UPDATE(OTG_PIXEL_RATE_CNTL[dtbclk_inst],
|
||||
PIPE_DTO_SRC_SEL[dtbclk_inst], 1);
|
||||
|
||||
dccg->dtbclk_khz[dtbclk_inst] = req_dtbclk_khz;
|
||||
REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
|
||||
PIPE_DTO_SRC_SEL[params->otg_inst], 1);
|
||||
} else {
|
||||
REG_UPDATE_3(OTG_PIXEL_RATE_CNTL[dtbclk_inst],
|
||||
DTBCLK_DTO_ENABLE[dtbclk_inst], 0,
|
||||
PIPE_DTO_SRC_SEL[dtbclk_inst], 0,
|
||||
DTBCLK_DTO_DIV[dtbclk_inst], dtbdto_div);
|
||||
REG_UPDATE_3(OTG_PIXEL_RATE_CNTL[params->otg_inst],
|
||||
DTBCLK_DTO_ENABLE[params->otg_inst], 0,
|
||||
PIPE_DTO_SRC_SEL[params->otg_inst], 0,
|
||||
DTBCLK_DTO_DIV[params->otg_inst], dtbdto_div);
|
||||
|
||||
REG_WRITE(DTBCLK_DTO_MODULO[dtbclk_inst], 0);
|
||||
REG_WRITE(DTBCLK_DTO_PHASE[dtbclk_inst], 0);
|
||||
|
||||
dccg->dtbclk_khz[dtbclk_inst] = 0;
|
||||
REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0);
|
||||
REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -606,16 +600,12 @@ void dccg31_set_audio_dtbclk_dto(
|
||||
|
||||
REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
|
||||
DCCG_AUDIO_DTO_SEL, 4); // 04 - DCCG_AUDIO_DTO_SEL_AUDIO_DTO_DTBCLK
|
||||
|
||||
dccg->audio_dtbclk_khz = req_audio_dtbclk_khz;
|
||||
} else {
|
||||
REG_WRITE(DCCG_AUDIO_DTBCLK_DTO_PHASE, 0);
|
||||
REG_WRITE(DCCG_AUDIO_DTBCLK_DTO_MODULO, 0);
|
||||
|
||||
REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
|
||||
DCCG_AUDIO_DTO_SEL, 3); // 03 - DCCG_AUDIO_DTO_SEL_NO_AUDIO_DTO
|
||||
|
||||
dccg->audio_dtbclk_khz = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -230,9 +230,7 @@ static void enc31_hw_init(struct link_encoder *enc)
|
||||
AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3
|
||||
AUX_RX_DETECTION_THRESHOLD [30:28] = 1
|
||||
*/
|
||||
AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
|
||||
|
||||
AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
|
||||
// dmub will read AUX_DPHY_RX_CONTROL0/AUX_DPHY_TX_CONTROL from vbios table in dp_aux_init
|
||||
|
||||
//AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
|
||||
// Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk
|
||||
|
@ -1284,10 +1284,8 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (!context->res_ctx.pipe_ctx[i].stream)
|
||||
continue;
|
||||
#if defined (CONFIG_DRM_AMD_DC_DP2_0)
|
||||
if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -237,6 +237,7 @@ struct clk_mgr_funcs {
|
||||
bool safe_to_lower);
|
||||
|
||||
int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr);
|
||||
int (*get_dtb_ref_clk_frequency)(struct clk_mgr *clk_mgr);
|
||||
|
||||
void (*set_low_power_state)(struct clk_mgr *clk_mgr);
|
||||
|
||||
|
@ -60,8 +60,17 @@ struct dccg {
|
||||
const struct dccg_funcs *funcs;
|
||||
int pipe_dppclk_khz[MAX_PIPES];
|
||||
int ref_dppclk;
|
||||
int dtbclk_khz[MAX_PIPES];
|
||||
int audio_dtbclk_khz;
|
||||
//int dtbclk_khz[MAX_PIPES];/* TODO needs to be removed */
|
||||
//int audio_dtbclk_khz;/* TODO needs to be removed */
|
||||
int ref_dtbclk_khz;/* TODO needs to be removed */
|
||||
};
|
||||
|
||||
struct dtbclk_dto_params {
|
||||
const struct dc_crtc_timing *timing;
|
||||
int otg_inst;
|
||||
int pixclk_khz;
|
||||
int req_audio_dtbclk_khz;
|
||||
int num_odm_segments;
|
||||
int ref_dtbclk_khz;
|
||||
};
|
||||
|
||||
@ -111,10 +120,7 @@ struct dccg_funcs {
|
||||
|
||||
void (*set_dtbclk_dto)(
|
||||
struct dccg *dccg,
|
||||
int dtbclk_inst,
|
||||
int req_dtbclk_khz,
|
||||
int num_odm_segments,
|
||||
const struct dc_crtc_timing *timing);
|
||||
struct dtbclk_dto_params *dto_params);
|
||||
|
||||
void (*set_audio_dtbclk_dto)(
|
||||
struct dccg *dccg,
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "core_types.h"
|
||||
#include "dccg.h"
|
||||
#include "dc_link_dp.h"
|
||||
#include "clk_mgr.h"
|
||||
|
||||
static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
|
||||
{
|
||||
@ -106,14 +107,18 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
|
||||
struct hpo_dp_link_encoder *link_enc = pipe_ctx->link_res.hpo_dp_link_enc;
|
||||
struct dccg *dccg = dc->res_pool->dccg;
|
||||
struct timing_generator *tg = pipe_ctx->stream_res.tg;
|
||||
int odm_segment_count = get_odm_segment_count(pipe_ctx);
|
||||
struct dtbclk_dto_params dto_params = {0};
|
||||
enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link);
|
||||
|
||||
dto_params.otg_inst = tg->inst;
|
||||
dto_params.pixclk_khz = pipe_ctx->stream->phy_pix_clk;
|
||||
dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
|
||||
dto_params.timing = &pipe_ctx->stream->timing;
|
||||
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
|
||||
|
||||
dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst);
|
||||
dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk);
|
||||
dccg->funcs->set_dtbclk_dto(dccg, tg->inst, pipe_ctx->stream->phy_pix_clk,
|
||||
odm_segment_count,
|
||||
&pipe_ctx->stream->timing);
|
||||
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
|
||||
stream_enc->funcs->enable_stream(stream_enc);
|
||||
stream_enc->funcs->map_stream_to_link(stream_enc, stream_enc->inst, link_enc->inst);
|
||||
}
|
||||
@ -124,9 +129,13 @@ static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
|
||||
struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
|
||||
struct dccg *dccg = dc->res_pool->dccg;
|
||||
struct timing_generator *tg = pipe_ctx->stream_res.tg;
|
||||
struct dtbclk_dto_params dto_params = {0};
|
||||
|
||||
dto_params.otg_inst = tg->inst;
|
||||
dto_params.timing = &pipe_ctx->stream->timing;
|
||||
|
||||
stream_enc->funcs->disable(stream_enc);
|
||||
dccg->funcs->set_dtbclk_dto(dccg, tg->inst, 0, 0, &pipe_ctx->stream->timing);
|
||||
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
|
||||
dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst);
|
||||
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst);
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
|
||||
{
|
||||
union dmub_gpint_data_register cmd;
|
||||
const uint32_t timeout = 100;
|
||||
uint32_t in_reset, scratch, i;
|
||||
uint32_t in_reset, scratch, i, pwait_mode;
|
||||
|
||||
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
|
||||
|
||||
@ -115,6 +115,13 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
for (i = 0; i < timeout; ++i) {
|
||||
REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode);
|
||||
if (pwait_mode & (1 << 0))
|
||||
break;
|
||||
|
||||
udelay(1);
|
||||
}
|
||||
/* Force reset in case we timed out, DMCUB is likely hung. */
|
||||
}
|
||||
|
||||
@ -125,6 +132,8 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
|
||||
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
|
||||
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
|
||||
REG_WRITE(DMCUB_OUTBOX1_WPTR, 0);
|
||||
REG_WRITE(DMCUB_OUTBOX0_RPTR, 0);
|
||||
REG_WRITE(DMCUB_OUTBOX0_WPTR, 0);
|
||||
REG_WRITE(DMCUB_SCRATCH0, 0);
|
||||
|
||||
/* Clear the GPINT command manually so we don't send anything during boot. */
|
||||
|
@ -151,7 +151,8 @@ struct dmub_srv;
|
||||
DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET) \
|
||||
DMUB_SF(DMCUB_INBOX0_WPTR, DMCUB_INBOX0_WPTR) \
|
||||
DMUB_SF(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN) \
|
||||
DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK)
|
||||
DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) \
|
||||
DMUB_SF(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS)
|
||||
|
||||
struct dmub_srv_dcn31_reg_offset {
|
||||
#define DMUB_SR(reg) uint32_t reg;
|
||||
|
@ -127,6 +127,8 @@ struct av_sync_data {
|
||||
static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3, 0};
|
||||
static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5, 0};
|
||||
|
||||
static const u8 DP_SINK_BRANCH_DEV_NAME_7580[] = "7580\x80u";
|
||||
|
||||
/*MST Dock*/
|
||||
static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA";
|
||||
|
||||
|
@ -22,6 +22,7 @@
|
||||
#ifndef SMU_11_0_7_PPTABLE_H
|
||||
#define SMU_11_0_7_PPTABLE_H
|
||||
|
||||
#pragma pack(push, 1)
|
||||
|
||||
#define SMU_11_0_7_TABLE_FORMAT_REVISION 15
|
||||
|
||||
@ -139,7 +140,7 @@ struct smu_11_0_7_overdrive_table
|
||||
uint32_t max[SMU_11_0_7_MAX_ODSETTING]; //default maximum settings
|
||||
uint32_t min[SMU_11_0_7_MAX_ODSETTING]; //default minimum settings
|
||||
int16_t pm_setting[SMU_11_0_7_MAX_PMSETTING]; //Optimized power mode feature settings
|
||||
} __attribute__((packed));
|
||||
};
|
||||
|
||||
enum SMU_11_0_7_PPCLOCK_ID {
|
||||
SMU_11_0_7_PPCLOCK_GFXCLK = 0,
|
||||
@ -166,7 +167,7 @@ struct smu_11_0_7_power_saving_clock_table
|
||||
uint32_t count; //power_saving_clock_count = SMU_11_0_7_PPCLOCK_COUNT
|
||||
uint32_t max[SMU_11_0_7_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz
|
||||
uint32_t min[SMU_11_0_7_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz
|
||||
} __attribute__((packed));
|
||||
};
|
||||
|
||||
struct smu_11_0_7_powerplay_table
|
||||
{
|
||||
@ -191,6 +192,8 @@ struct smu_11_0_7_powerplay_table
|
||||
struct smu_11_0_7_overdrive_table overdrive_table;
|
||||
|
||||
PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h
|
||||
} __attribute__((packed));
|
||||
};
|
||||
|
||||
#pragma pack(pop)
|
||||
|
||||
#endif
|
||||
|
@ -22,6 +22,7 @@
|
||||
#ifndef SMU_11_0_PPTABLE_H
|
||||
#define SMU_11_0_PPTABLE_H
|
||||
|
||||
#pragma pack(push, 1)
|
||||
|
||||
#define SMU_11_0_TABLE_FORMAT_REVISION 12
|
||||
|
||||
@ -109,7 +110,7 @@ struct smu_11_0_overdrive_table
|
||||
uint8_t cap[SMU_11_0_MAX_ODFEATURE]; //OD feature support flags
|
||||
uint32_t max[SMU_11_0_MAX_ODSETTING]; //default maximum settings
|
||||
uint32_t min[SMU_11_0_MAX_ODSETTING]; //default minimum settings
|
||||
} __attribute__((packed));
|
||||
};
|
||||
|
||||
enum SMU_11_0_PPCLOCK_ID {
|
||||
SMU_11_0_PPCLOCK_GFXCLK = 0,
|
||||
@ -133,7 +134,7 @@ struct smu_11_0_power_saving_clock_table
|
||||
uint32_t count; //power_saving_clock_count = SMU_11_0_PPCLOCK_COUNT
|
||||
uint32_t max[SMU_11_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz
|
||||
uint32_t min[SMU_11_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz
|
||||
} __attribute__((packed));
|
||||
};
|
||||
|
||||
struct smu_11_0_powerplay_table
|
||||
{
|
||||
@ -162,6 +163,8 @@ struct smu_11_0_powerplay_table
|
||||
#ifndef SMU_11_0_PARTIAL_PPTABLE
|
||||
PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h
|
||||
#endif
|
||||
} __attribute__((packed));
|
||||
};
|
||||
|
||||
#pragma pack(pop)
|
||||
|
||||
#endif
|
||||
|
@ -22,6 +22,8 @@
|
||||
#ifndef SMU_13_0_7_PPTABLE_H
|
||||
#define SMU_13_0_7_PPTABLE_H
|
||||
|
||||
#pragma pack(push, 1)
|
||||
|
||||
#define SMU_13_0_7_TABLE_FORMAT_REVISION 15
|
||||
|
||||
//// POWERPLAYTABLE::ulPlatformCaps
|
||||
@ -194,7 +196,8 @@ struct smu_13_0_7_powerplay_table
|
||||
struct smu_13_0_7_overdrive_table overdrive_table;
|
||||
uint8_t padding1;
|
||||
PPTable_t smc_pptable; //PPTable_t in driver_if.h
|
||||
} __attribute__((packed));
|
||||
};
|
||||
|
||||
#pragma pack(pop)
|
||||
|
||||
#endif
|
||||
|
@ -22,6 +22,8 @@
|
||||
#ifndef SMU_13_0_PPTABLE_H
|
||||
#define SMU_13_0_PPTABLE_H
|
||||
|
||||
#pragma pack(push, 1)
|
||||
|
||||
#define SMU_13_0_TABLE_FORMAT_REVISION 1
|
||||
|
||||
//// POWERPLAYTABLE::ulPlatformCaps
|
||||
@ -109,7 +111,7 @@ struct smu_13_0_overdrive_table {
|
||||
uint8_t cap[SMU_13_0_MAX_ODFEATURE]; //OD feature support flags
|
||||
uint32_t max[SMU_13_0_MAX_ODSETTING]; //default maximum settings
|
||||
uint32_t min[SMU_13_0_MAX_ODSETTING]; //default minimum settings
|
||||
} __attribute__((packed));
|
||||
};
|
||||
|
||||
enum SMU_13_0_PPCLOCK_ID {
|
||||
SMU_13_0_PPCLOCK_GFXCLK = 0,
|
||||
@ -132,7 +134,7 @@ struct smu_13_0_power_saving_clock_table {
|
||||
uint32_t count; //power_saving_clock_count = SMU_11_0_PPCLOCK_COUNT
|
||||
uint32_t max[SMU_13_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz
|
||||
uint32_t min[SMU_13_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz
|
||||
} __attribute__((packed));
|
||||
};
|
||||
|
||||
struct smu_13_0_powerplay_table {
|
||||
struct atom_common_table_header header;
|
||||
@ -160,6 +162,8 @@ struct smu_13_0_powerplay_table {
|
||||
#ifndef SMU_13_0_PARTIAL_PPTABLE
|
||||
PPTable_t smc_pptable; //PPTable_t in driver_if.h
|
||||
#endif
|
||||
} __attribute__((packed));
|
||||
};
|
||||
|
||||
#pragma pack(pop)
|
||||
|
||||
#endif
|
||||
|
@ -160,13 +160,12 @@ void ast_dp_launch(struct drm_device *dev, u8 bPower)
|
||||
}
|
||||
|
||||
if (bDPExecute)
|
||||
ast->tx_chip_type = AST_TX_ASTDP;
|
||||
ast->tx_chip_types |= BIT(AST_TX_ASTDP);
|
||||
|
||||
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
|
||||
(u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
|
||||
ASTDP_HOST_EDID_READ_DONE);
|
||||
} else
|
||||
ast->tx_chip_type = AST_TX_NONE;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -450,7 +450,7 @@ void ast_init_3rdtx(struct drm_device *dev)
|
||||
ast_init_dvo(dev);
|
||||
break;
|
||||
default:
|
||||
if (ast->tx_chip_type == AST_TX_SIL164)
|
||||
if (ast->tx_chip_types & BIT(AST_TX_SIL164))
|
||||
ast_init_dvo(dev);
|
||||
else
|
||||
ast_init_analog(dev);
|
||||
|
@ -73,6 +73,11 @@ enum ast_tx_chip {
|
||||
AST_TX_ASTDP,
|
||||
};
|
||||
|
||||
#define AST_TX_NONE_BIT BIT(AST_TX_NONE)
|
||||
#define AST_TX_SIL164_BIT BIT(AST_TX_SIL164)
|
||||
#define AST_TX_DP501_BIT BIT(AST_TX_DP501)
|
||||
#define AST_TX_ASTDP_BIT BIT(AST_TX_ASTDP)
|
||||
|
||||
#define AST_DRAM_512Mx16 0
|
||||
#define AST_DRAM_1Gx16 1
|
||||
#define AST_DRAM_512Mx32 2
|
||||
@ -173,7 +178,7 @@ struct ast_private {
|
||||
struct drm_plane primary_plane;
|
||||
struct ast_cursor_plane cursor_plane;
|
||||
struct drm_crtc crtc;
|
||||
union {
|
||||
struct {
|
||||
struct {
|
||||
struct drm_encoder encoder;
|
||||
struct ast_vga_connector vga_connector;
|
||||
@ -199,7 +204,7 @@ struct ast_private {
|
||||
ast_use_defaults
|
||||
} config_mode;
|
||||
|
||||
enum ast_tx_chip tx_chip_type;
|
||||
unsigned long tx_chip_types; /* bitfield of enum ast_chip_type */
|
||||
u8 *dp501_fw_addr;
|
||||
const struct firmware *dp501_fw; /* dp501 fw */
|
||||
};
|
||||
|
@ -216,7 +216,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
|
||||
}
|
||||
|
||||
/* Check 3rd Tx option (digital output afaik) */
|
||||
ast->tx_chip_type = AST_TX_NONE;
|
||||
ast->tx_chip_types |= AST_TX_NONE_BIT;
|
||||
|
||||
/*
|
||||
* VGACRA3 Enhanced Color Mode Register, check if DVO is already
|
||||
@ -229,7 +229,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
|
||||
if (!*need_post) {
|
||||
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xff);
|
||||
if (jreg & 0x80)
|
||||
ast->tx_chip_type = AST_TX_SIL164;
|
||||
ast->tx_chip_types = AST_TX_SIL164_BIT;
|
||||
}
|
||||
|
||||
if ((ast->chip == AST2300) || (ast->chip == AST2400) || (ast->chip == AST2500)) {
|
||||
@ -241,7 +241,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
|
||||
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
|
||||
switch (jreg) {
|
||||
case 0x04:
|
||||
ast->tx_chip_type = AST_TX_SIL164;
|
||||
ast->tx_chip_types = AST_TX_SIL164_BIT;
|
||||
break;
|
||||
case 0x08:
|
||||
ast->dp501_fw_addr = drmm_kzalloc(dev, 32*1024, GFP_KERNEL);
|
||||
@ -254,22 +254,19 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
|
||||
}
|
||||
fallthrough;
|
||||
case 0x0c:
|
||||
ast->tx_chip_type = AST_TX_DP501;
|
||||
ast->tx_chip_types = AST_TX_DP501_BIT;
|
||||
}
|
||||
} else if (ast->chip == AST2600)
|
||||
ast_dp_launch(&ast->base, 0);
|
||||
|
||||
/* Print stuff for diagnostic purposes */
|
||||
switch(ast->tx_chip_type) {
|
||||
case AST_TX_SIL164:
|
||||
if (ast->tx_chip_types & AST_TX_NONE_BIT)
|
||||
drm_info(dev, "Using analog VGA\n");
|
||||
if (ast->tx_chip_types & AST_TX_SIL164_BIT)
|
||||
drm_info(dev, "Using Sil164 TMDS transmitter\n");
|
||||
break;
|
||||
case AST_TX_DP501:
|
||||
if (ast->tx_chip_types & AST_TX_DP501_BIT)
|
||||
drm_info(dev, "Using DP501 DisplayPort transmitter\n");
|
||||
break;
|
||||
default:
|
||||
drm_info(dev, "Analog VGA only\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -997,10 +997,10 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
case DRM_MODE_DPMS_ON:
|
||||
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, 0);
|
||||
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xfc, 0);
|
||||
if (ast->tx_chip_type == AST_TX_DP501)
|
||||
if (ast->tx_chip_types & AST_TX_DP501_BIT)
|
||||
ast_set_dp501_video_output(crtc->dev, 1);
|
||||
|
||||
if (ast->tx_chip_type == AST_TX_ASTDP) {
|
||||
if (ast->tx_chip_types & AST_TX_ASTDP_BIT) {
|
||||
ast_dp_power_on_off(crtc->dev, AST_DP_POWER_ON);
|
||||
ast_wait_for_vretrace(ast);
|
||||
ast_dp_set_on_off(crtc->dev, 1);
|
||||
@ -1012,17 +1012,17 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
case DRM_MODE_DPMS_SUSPEND:
|
||||
case DRM_MODE_DPMS_OFF:
|
||||
ch = mode;
|
||||
if (ast->tx_chip_type == AST_TX_DP501)
|
||||
if (ast->tx_chip_types & AST_TX_DP501_BIT)
|
||||
ast_set_dp501_video_output(crtc->dev, 0);
|
||||
break;
|
||||
|
||||
if (ast->tx_chip_type == AST_TX_ASTDP) {
|
||||
if (ast->tx_chip_types & AST_TX_ASTDP_BIT) {
|
||||
ast_dp_set_on_off(crtc->dev, 0);
|
||||
ast_dp_power_on_off(crtc->dev, AST_DP_POWER_OFF);
|
||||
}
|
||||
|
||||
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, 0x20);
|
||||
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xfc, ch);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1155,7 +1155,7 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
|
||||
ast_crtc_load_lut(ast, crtc);
|
||||
|
||||
//Set Aspeed Display-Port
|
||||
if (ast->tx_chip_type == AST_TX_ASTDP)
|
||||
if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
|
||||
ast_dp_set_mode(crtc, vbios_mode_info);
|
||||
|
||||
mutex_unlock(&ast->ioregs_lock);
|
||||
@ -1739,22 +1739,26 @@ int ast_mode_config_init(struct ast_private *ast)
|
||||
|
||||
ast_crtc_init(dev);
|
||||
|
||||
switch (ast->tx_chip_type) {
|
||||
case AST_TX_NONE:
|
||||
if (ast->tx_chip_types & AST_TX_NONE_BIT) {
|
||||
ret = ast_vga_output_init(ast);
|
||||
break;
|
||||
case AST_TX_SIL164:
|
||||
ret = ast_sil164_output_init(ast);
|
||||
break;
|
||||
case AST_TX_DP501:
|
||||
ret = ast_dp501_output_init(ast);
|
||||
break;
|
||||
case AST_TX_ASTDP:
|
||||
ret = ast_astdp_output_init(ast);
|
||||
break;
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
if (ast->tx_chip_types & AST_TX_SIL164_BIT) {
|
||||
ret = ast_sil164_output_init(ast);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
if (ast->tx_chip_types & AST_TX_DP501_BIT) {
|
||||
ret = ast_dp501_output_init(ast);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
if (ast->tx_chip_types & AST_TX_ASTDP_BIT) {
|
||||
ret = ast_astdp_output_init(ast);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_mode_config_reset(dev);
|
||||
|
||||
|
@ -391,7 +391,7 @@ void ast_post_gpu(struct drm_device *dev)
|
||||
|
||||
ast_init_3rdtx(dev);
|
||||
} else {
|
||||
if (ast->tx_chip_type != AST_TX_NONE)
|
||||
if (ast->tx_chip_types & AST_TX_SIL164_BIT)
|
||||
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
|
||||
}
|
||||
}
|
||||
|
@ -1266,6 +1266,25 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static
|
||||
struct drm_crtc *analogix_dp_get_old_crtc(struct analogix_dp_device *dp,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_encoder *encoder = dp->encoder;
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_state *conn_state;
|
||||
|
||||
connector = drm_atomic_get_old_connector_for_encoder(state, encoder);
|
||||
if (!connector)
|
||||
return NULL;
|
||||
|
||||
conn_state = drm_atomic_get_old_connector_state(state, connector);
|
||||
if (!conn_state)
|
||||
return NULL;
|
||||
|
||||
return conn_state->crtc;
|
||||
}
|
||||
|
||||
static
|
||||
struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp,
|
||||
struct drm_atomic_state *state)
|
||||
@ -1446,14 +1465,16 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
|
||||
{
|
||||
struct drm_atomic_state *old_state = old_bridge_state->base.state;
|
||||
struct analogix_dp_device *dp = bridge->driver_private;
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_crtc *old_crtc, *new_crtc;
|
||||
struct drm_crtc_state *old_crtc_state = NULL;
|
||||
struct drm_crtc_state *new_crtc_state = NULL;
|
||||
int ret;
|
||||
|
||||
crtc = analogix_dp_get_new_crtc(dp, old_state);
|
||||
if (!crtc)
|
||||
new_crtc = analogix_dp_get_new_crtc(dp, old_state);
|
||||
if (!new_crtc)
|
||||
goto out;
|
||||
|
||||
new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc);
|
||||
new_crtc_state = drm_atomic_get_new_crtc_state(old_state, new_crtc);
|
||||
if (!new_crtc_state)
|
||||
goto out;
|
||||
|
||||
@ -1462,6 +1483,19 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
|
||||
return;
|
||||
|
||||
out:
|
||||
old_crtc = analogix_dp_get_old_crtc(dp, old_state);
|
||||
if (old_crtc) {
|
||||
old_crtc_state = drm_atomic_get_old_crtc_state(old_state,
|
||||
old_crtc);
|
||||
|
||||
/* When moving from PSR to fully disabled, exit PSR first. */
|
||||
if (old_crtc_state && old_crtc_state->self_refresh_active) {
|
||||
ret = analogix_dp_disable_psr(dp);
|
||||
if (ret)
|
||||
DRM_ERROR("Failed to disable psr (%d)\n", ret);
|
||||
}
|
||||
}
|
||||
|
||||
analogix_dp_bridge_disable(bridge);
|
||||
}
|
||||
|
||||
|
@ -577,7 +577,7 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
|
||||
ctx->host_node = of_graph_get_remote_port_parent(endpoint);
|
||||
of_node_put(endpoint);
|
||||
|
||||
if (ctx->dsi_lanes < 0 || ctx->dsi_lanes > 4) {
|
||||
if (ctx->dsi_lanes <= 0 || ctx->dsi_lanes > 4) {
|
||||
ret = -EINVAL;
|
||||
goto err_put_node;
|
||||
}
|
||||
|
@ -1011,9 +1011,19 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
|
||||
return drm_atomic_crtc_effectively_active(old_state);
|
||||
|
||||
/*
|
||||
* We need to run through the crtc_funcs->disable() function if the CRTC
|
||||
* is currently on, if it's transitioning to self refresh mode, or if
|
||||
* it's in self refresh mode and needs to be fully disabled.
|
||||
* We need to disable bridge(s) and CRTC if we're transitioning out of
|
||||
* self-refresh and changing CRTCs at the same time, because the
|
||||
* bridge tracks self-refresh status via CRTC state.
|
||||
*/
|
||||
if (old_state->self_refresh_active &&
|
||||
old_state->crtc != new_state->crtc)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* We also need to run through the crtc_funcs->disable() function if
|
||||
* the CRTC is currently on, if it's transitioning to self refresh
|
||||
* mode, or if it's in self refresh mode and needs to be fully
|
||||
* disabled.
|
||||
*/
|
||||
return old_state->active ||
|
||||
(old_state->self_refresh_active && !new_state->active) ||
|
||||
|
@ -233,6 +233,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct panfrost_device *pfdev = dev->dev_private;
|
||||
struct panfrost_file_priv *file_priv = file->driver_priv;
|
||||
struct drm_panfrost_submit *args = data;
|
||||
struct drm_syncobj *sync_out = NULL;
|
||||
struct panfrost_job *job;
|
||||
@ -262,12 +263,12 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
|
||||
job->jc = args->jc;
|
||||
job->requirements = args->requirements;
|
||||
job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
|
||||
job->file_priv = file->driver_priv;
|
||||
job->mmu = file_priv->mmu;
|
||||
|
||||
slot = panfrost_job_get_slot(job);
|
||||
|
||||
ret = drm_sched_job_init(&job->base,
|
||||
&job->file_priv->sched_entity[slot],
|
||||
&file_priv->sched_entity[slot],
|
||||
NULL);
|
||||
if (ret)
|
||||
goto out_put_job;
|
||||
|
@ -201,7 +201,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
|
||||
return;
|
||||
}
|
||||
|
||||
cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu);
|
||||
cfg = panfrost_mmu_as_get(pfdev, job->mmu);
|
||||
|
||||
job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head));
|
||||
job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head));
|
||||
@ -435,7 +435,7 @@ static void panfrost_job_handle_err(struct panfrost_device *pfdev,
|
||||
job->jc = 0;
|
||||
}
|
||||
|
||||
panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
|
||||
panfrost_mmu_as_put(pfdev, job->mmu);
|
||||
panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
|
||||
|
||||
if (signal_fence)
|
||||
@ -456,7 +456,7 @@ static void panfrost_job_handle_done(struct panfrost_device *pfdev,
|
||||
* happen when we receive the DONE interrupt while doing a GPU reset).
|
||||
*/
|
||||
job->jc = 0;
|
||||
panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
|
||||
panfrost_mmu_as_put(pfdev, job->mmu);
|
||||
panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
|
||||
|
||||
dma_fence_signal_locked(job->done_fence);
|
||||
|
@ -17,7 +17,7 @@ struct panfrost_job {
|
||||
struct kref refcount;
|
||||
|
||||
struct panfrost_device *pfdev;
|
||||
struct panfrost_file_priv *file_priv;
|
||||
struct panfrost_mmu *mmu;
|
||||
|
||||
/* Fence to be signaled by IRQ handler when the job is complete. */
|
||||
struct dma_fence *done_fence;
|
||||
|
Loading…
Reference in New Issue
Block a user