Merge tag 'amd-drm-next-5.18-2022-03-18' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-next-5.18-2022-03-18:

amdgpu:
- Aldebaran fixes
- SMU 13.0.5 fixes
- DCN 3.1.5 fixes
- DCN 3.1.6 fixes
- Pipe split fixes
- More display FP cleanup
- DP 2.0 UHBR fix
- DC GPU reset fix
- DC deep color ratio fix
- SMU robustness fixes
- Runtime PM fix for APUs
- IGT reload fixes
- SR-IOV fix
- Misc fixes and cleanups

amdkfd:
- CRIU fixes
- SVM fixes

UAPI:
- Properly handle SDMA transfers with CRIU
  Proposed user mode change: https://github.com/checkpoint-restore/criu/pull/1709

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220318203717.5833-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie 2022-03-21 13:48:19 +10:00
commit c6e90a1c66
117 changed files with 3943 additions and 2943 deletions

View File

@ -53,11 +53,11 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_preempt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o \
amdgpu_atomfirmware.o amdgpu_vf_error.o amdgpu_sched.o \
amdgpu_debugfs.o amdgpu_ids.o amdgpu_gmc.o amdgpu_mmhub.o \
amdgpu_debugfs.o amdgpu_ids.o amdgpu_gmc.o \
amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o amdgpu_hdp.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o \
amdgpu_eeprom.o amdgpu_mca.o
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o

View File

@ -626,7 +626,7 @@ amdgpu_connector_fixup_lcd_native_mode(struct drm_encoder *encoder,
if (mode->type & DRM_MODE_TYPE_PREFERRED) {
if (mode->hdisplay != native_mode->hdisplay ||
mode->vdisplay != native_mode->vdisplay)
memcpy(native_mode, mode, sizeof(*mode));
drm_mode_copy(native_mode, mode);
}
}
@ -635,7 +635,7 @@ amdgpu_connector_fixup_lcd_native_mode(struct drm_encoder *encoder,
list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
if (mode->hdisplay == native_mode->hdisplay &&
mode->vdisplay == native_mode->vdisplay) {
*native_mode = *mode;
drm_mode_copy(native_mode, mode);
drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
break;

View File

@ -340,7 +340,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
s64 min_us;
/* Be more aggresive on dGPUs. Try to fill a portion of free
/* Be more aggressive on dGPUs. Try to fill a portion of free
* VRAM now.
*/
if (!(adev->flags & AMD_IS_APU))
@ -1280,7 +1280,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
continue;
/*
* Work around dma_resv shortcommings by wrapping up the
* Work around dma_resv shortcomings by wrapping up the
* submission in a dma_fence_chain and add it as exclusive
* fence.
*/

View File

@ -2159,8 +2159,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
!pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
adev->flags |= AMD_IS_PX;
parent = pci_upstream_bridge(adev->pdev);
adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
if (!(adev->flags & AMD_IS_APU)) {
parent = pci_upstream_bridge(adev->pdev);
adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
}
amdgpu_amdkfd_device_probe(adev);
@ -3664,6 +3666,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
adev->enable_mes = true;
/*
* Reset domain needs to be present early, before XGMI hive discovered
* (if any) and intitialized to use reset sem and in_gpu reset flag
* early on during init and before calling to RREG32.
*/
adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
if (!adev->reset_domain)
return -ENOMEM;
/* detect hw virtualization here */
amdgpu_detect_virtualization(adev);
@ -3673,15 +3684,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
return r;
}
/*
* Reset domain needs to be present early, before XGMI hive discovered
* (if any) and intitialized to use reset sem and in_gpu reset flag
* early on during init.
*/
adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE ,"amdgpu-reset-dev");
if (!adev->reset_domain)
return -ENOMEM;
/* early init functions */
r = amdgpu_device_ip_early_init(adev);
if (r)

View File

@ -622,6 +622,13 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
{
unsigned size;
/*
* Some ASICs need to reserve a region of video memory to avoid access
* from driver
*/
adev->mman.stolen_reserved_offset = 0;
adev->mman.stolen_reserved_size = 0;
/*
* TODO:
* Currently there is a bug where some memory client outside
@ -632,10 +639,25 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
*/
switch (adev->asic_type) {
case CHIP_VEGA10:
adev->mman.keep_stolen_vga_memory = true;
/*
* VEGA10 SRIOV VF needs some firmware reserved area.
*/
if (amdgpu_sriov_vf(adev)) {
adev->mman.stolen_reserved_offset = 0x100000;
adev->mman.stolen_reserved_size = 0x600000;
}
break;
case CHIP_RAVEN:
case CHIP_RENOIR:
adev->mman.keep_stolen_vga_memory = true;
break;
case CHIP_YELLOW_CARP:
if (amdgpu_discovery == 0) {
adev->mman.stolen_reserved_offset = 0x1ffb0000;
adev->mman.stolen_reserved_size = 64 * PAGE_SIZE;
}
break;
default:
adev->mman.keep_stolen_vga_memory = false;
break;
@ -756,25 +778,6 @@ uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo
return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base;
}
void amdgpu_gmc_get_reserved_allocation(struct amdgpu_device *adev)
{
/* Some ASICs need to reserve a region of video memory to avoid access
* from driver */
adev->mman.stolen_reserved_offset = 0;
adev->mman.stolen_reserved_size = 0;
switch (adev->asic_type) {
case CHIP_YELLOW_CARP:
if (amdgpu_discovery == 0) {
adev->mman.stolen_reserved_offset = 0x1ffb0000;
adev->mman.stolen_reserved_size = 64 * PAGE_SIZE;
}
break;
default:
break;
}
}
int amdgpu_gmc_vram_checking(struct amdgpu_device *adev)
{
struct amdgpu_bo *vram_bo = NULL;

View File

@ -331,7 +331,6 @@ amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
bool enable);
void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev);
void amdgpu_gmc_get_reserved_allocation(struct amdgpu_device *adev);
void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev);
uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr);

View File

@ -166,8 +166,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
}
if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) &&
(ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)) {
dev_err(adev->dev, "secure submissions not supported on compute rings\n");
(!ring->funcs->secure_submission_supported)) {
dev_err(adev->dev, "secure submissions not supported on ring <%s>\n", ring->name);
return -EINVAL;
}

View File

@ -310,6 +310,10 @@ static int psp_sw_init(void *handle)
return ret;
}
adev->psp.xgmi_context.supports_extended_data =
!adev->gmc.xgmi.connected_to_cpu &&
adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2);
memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
if (psp_get_runtime_db_entry(adev,
PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
@ -3008,7 +3012,6 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev)
adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
adev->psp.sos.start_addr = ucode_array_start_addr +
le32_to_cpu(sos_hdr->sos.offset_bytes);
adev->psp.xgmi_context.supports_extended_data = false;
} else {
/* Load alternate PSP SOS FW */
sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
@ -3023,7 +3026,6 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev)
adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
adev->psp.sos.start_addr = ucode_array_start_addr +
le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
adev->psp.xgmi_context.supports_extended_data = true;
}
if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {

View File

@ -2068,6 +2068,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
mutex_init(&con->recovery_lock);
INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
atomic_set(&con->in_recovery, 0);
con->eeprom_control.bad_channel_bitmap = 0;
max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count();
amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
@ -2092,6 +2093,11 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
goto free;
amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
if (con->update_channel_flag == true) {
amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
con->update_channel_flag = false;
}
}
#ifdef CONFIG_X86_MCE_AMD
@ -2285,6 +2291,7 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
goto release_con;
}
con->update_channel_flag = false;
con->features = 0;
INIT_LIST_HEAD(&con->head);
/* Might need get this flag from vbios. */

View File

@ -374,6 +374,9 @@ struct amdgpu_ras {
/* record umc error info queried from smu */
struct umc_ecc_info umc_ecc;
/* Indicates smu whether need update bad channel info */
bool update_channel_flag;
};
struct ras_fs_data {

View File

@ -267,6 +267,7 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
u8 csum;
int res;
@ -287,6 +288,10 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
amdgpu_dpm_send_hbm_bad_pages_num(adev, control->ras_num_recs);
control->bad_channel_bitmap = 0;
amdgpu_dpm_send_hbm_bad_channel_flag(adev, control->bad_channel_bitmap);
con->update_channel_flag = false;
amdgpu_ras_debugfs_set_ret_size(control);
mutex_unlock(&control->ras_tbl_mutex);
@ -420,6 +425,7 @@ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *record,
const u32 num)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(to_amdgpu_device(control));
u32 a, b, i;
u8 *buf, *pp;
int res;
@ -431,9 +437,16 @@ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control,
/* Encode all of them in one go.
*/
pp = buf;
for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE)
for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE) {
__encode_table_record_to_buf(control, &record[i], pp);
/* update bad channel bitmap */
if (!(control->bad_channel_bitmap & (1 << record[i].mem_channel))) {
control->bad_channel_bitmap |= 1 << record[i].mem_channel;
con->update_channel_flag = true;
}
}
/* a, first record index to write into.
* b, last record index to write into.
* a = first index to read (fri) + number of records in the table,
@ -686,6 +699,7 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
const u32 num)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
int i, res;
u8 *buf, *pp;
u32 g0, g1;
@ -753,8 +767,15 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
/* Read up everything? Then transform.
*/
pp = buf;
for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE)
for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE) {
__decode_table_record_from_buf(control, &record[i], pp);
/* update bad channel bitmap */
if (!(control->bad_channel_bitmap & (1 << record[i].mem_channel))) {
control->bad_channel_bitmap |= 1 << record[i].mem_channel;
con->update_channel_flag = true;
}
}
Out:
kfree(buf);
mutex_unlock(&control->ras_tbl_mutex);

View File

@ -80,6 +80,10 @@ struct amdgpu_ras_eeprom_control {
/* Protect table access via this mutex.
*/
struct mutex ras_tbl_mutex;
/* Record channel info which occurred bad pages
*/
u32 bad_channel_bitmap;
};
/*

View File

@ -155,6 +155,7 @@ struct amdgpu_ring_funcs {
u32 nop;
bool support_64bit_ptrs;
bool no_user_fence;
bool secure_submission_supported;
unsigned vmhub;
unsigned extra_dw;

View File

@ -97,6 +97,11 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
amdgpu_ras_save_bad_pages(adev);
amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
if (con->update_channel_flag == true) {
amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
con->update_channel_flag = false;
}
}
if (reset)

View File

@ -34,7 +34,6 @@
#include "amdgpu_reset.h"
#define smnPCS_XGMI23_PCS_ERROR_STATUS 0x11a01210
#define smnPCS_XGMI3X16_PCS_ERROR_STATUS 0x11a0020c
#define smnPCS_GOPX1_PCS_ERROR_STATUS 0x12200210
@ -69,17 +68,6 @@ static const int wafl_pcs_err_status_reg_arct[] = {
smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
};
static const int xgmi23_pcs_err_status_reg_aldebaran[] = {
smnPCS_XGMI23_PCS_ERROR_STATUS,
smnPCS_XGMI23_PCS_ERROR_STATUS + 0x100000,
smnPCS_XGMI23_PCS_ERROR_STATUS + 0x200000,
smnPCS_XGMI23_PCS_ERROR_STATUS + 0x300000,
smnPCS_XGMI23_PCS_ERROR_STATUS + 0x400000,
smnPCS_XGMI23_PCS_ERROR_STATUS + 0x500000,
smnPCS_XGMI23_PCS_ERROR_STATUS + 0x600000,
smnPCS_XGMI23_PCS_ERROR_STATUS + 0x700000
};
static const int xgmi3x16_pcs_err_status_reg_aldebaran[] = {
smnPCS_XGMI3X16_PCS_ERROR_STATUS,
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x100000,
@ -797,9 +785,6 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
xgmi_pcs_err_status_reg_vg20[i]);
break;
case CHIP_ALDEBARAN:
for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++)
pcs_clear_status(adev,
xgmi23_pcs_err_status_reg_aldebaran[i]);
for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++)
pcs_clear_status(adev,
xgmi3x16_pcs_err_status_reg_aldebaran[i]);
@ -900,13 +885,6 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
}
break;
case CHIP_ALDEBARAN:
/* check xgmi23 pcs error */
for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++) {
data = RREG32_PCIE(xgmi23_pcs_err_status_reg_aldebaran[i]);
if (data)
amdgpu_xgmi_query_pcs_error_status(adev,
data, &ue_cnt, &ce_cnt, true);
}
/* check xgmi3x16 pcs error */
for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++) {
data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_aldebaran[i]);

View File

@ -9377,6 +9377,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
.vmhub = AMDGPU_GFXHUB_0,
.get_rptr = gfx_v10_0_ring_get_rptr_gfx,
.get_wptr = gfx_v10_0_ring_get_wptr_gfx,

View File

@ -6865,6 +6865,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
.vmhub = AMDGPU_GFXHUB_0,
.get_rptr = gfx_v9_0_ring_get_rptr_gfx,
.get_wptr = gfx_v9_0_ring_get_wptr_gfx,

View File

@ -948,7 +948,6 @@ static int gmc_v10_0_sw_init(void *handle)
return r;
amdgpu_gmc_get_vbios_allocations(adev);
amdgpu_gmc_get_reserved_allocation(adev);
/* Memory manager */
r = amdgpu_bo_init(adev);

View File

@ -362,9 +362,24 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald = {
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
{
uint32_t baco_cntl;
if (amdgpu_sriov_vf(adev))
adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4) &&
!amdgpu_sriov_vf(adev)) {
baco_cntl = RREG32_SOC15(NBIO, 0, mmBACO_CNTL);
if (baco_cntl &
(BACO_CNTL__BACO_DUMMY_EN_MASK | BACO_CNTL__BACO_EN_MASK)) {
baco_cntl &= ~(BACO_CNTL__BACO_DUMMY_EN_MASK |
BACO_CNTL__BACO_EN_MASK);
dev_dbg(adev->dev, "Unsetting baco dummy mode %x",
baco_cntl);
WREG32_SOC15(NBIO, 0, mmBACO_CNTL, baco_cntl);
}
}
}
static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)

View File

@ -1142,6 +1142,7 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = false,
.secure_submission_supported = true,
.get_rptr = sdma_v2_4_ring_get_rptr,
.get_wptr = sdma_v2_4_ring_get_wptr,
.set_wptr = sdma_v2_4_ring_set_wptr,

View File

@ -1580,6 +1580,7 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = false,
.secure_submission_supported = true,
.get_rptr = sdma_v3_0_ring_get_rptr,
.get_wptr = sdma_v3_0_ring_get_wptr,
.set_wptr = sdma_v3_0_ring_set_wptr,

View File

@ -2414,6 +2414,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
.vmhub = AMDGPU_MMHUB_0,
.get_rptr = sdma_v4_0_ring_get_rptr,
.get_wptr = sdma_v4_0_ring_get_wptr,
@ -2450,6 +2451,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs_2nd_mmhub = {
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
.vmhub = AMDGPU_MMHUB_1,
.get_rptr = sdma_v4_0_ring_get_rptr,
.get_wptr = sdma_v4_0_ring_get_wptr,
@ -2482,6 +2484,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
.vmhub = AMDGPU_MMHUB_0,
.get_rptr = sdma_v4_0_ring_get_rptr,
.get_wptr = sdma_v4_0_page_ring_get_wptr,
@ -2514,6 +2517,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs_2nd_mmhub = {
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
.vmhub = AMDGPU_MMHUB_1,
.get_rptr = sdma_v4_0_ring_get_rptr,
.get_wptr = sdma_v4_0_page_ring_get_wptr,

View File

@ -1690,6 +1690,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
.vmhub = AMDGPU_GFXHUB_0,
.get_rptr = sdma_v5_0_ring_get_rptr,
.get_wptr = sdma_v5_0_ring_get_wptr,

View File

@ -1687,6 +1687,7 @@ static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
.vmhub = AMDGPU_GFXHUB_0,
.get_rptr = sdma_v5_2_ring_get_rptr,
.get_wptr = sdma_v5_2_ring_get_wptr,

View File

@ -853,6 +853,10 @@ static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
{
u32 sol_reg;
/* CP hangs in IGT reloading test on RN, reset to WA */
if (adev->asic_type == CHIP_RENOIR)
return true;
/* Just return false for soc15 GPUs. Reset does not seem to
* be necessary.
*/

View File

@ -1102,13 +1102,8 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
static int vcn_v1_0_start(struct amdgpu_device *adev)
{
int r;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
r = vcn_v1_0_start_dpg_mode(adev);
else
r = vcn_v1_0_start_spg_mode(adev);
return r;
return (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ?
vcn_v1_0_start_dpg_mode(adev) : vcn_v1_0_start_spg_mode(adev);
}
/**
@ -1910,6 +1905,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.align_mask = 0xf,
.support_64bit_ptrs = false,
.no_user_fence = true,
.secure_submission_supported = true,
.vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v1_0_dec_ring_get_rptr,
.get_wptr = vcn_v1_0_dec_ring_get_wptr,

View File

@ -1098,8 +1098,10 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
{
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
vcn_v2_0_pause_dpg_mode(adev, 0, &state);
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
@ -2007,6 +2009,7 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
.secure_submission_supported = true,
.vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v2_0_dec_ring_get_rptr,
.get_wptr = vcn_v2_0_dec_ring_get_wptr,

View File

@ -1515,6 +1515,7 @@ static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
.secure_submission_supported = true,
.vmhub = AMDGPU_MMHUB_1,
.get_rptr = vcn_v2_5_dec_ring_get_rptr,
.get_wptr = vcn_v2_5_dec_ring_get_wptr,
@ -1545,6 +1546,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
static const struct amdgpu_ring_funcs vcn_v2_6_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
.secure_submission_supported = true,
.vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v2_5_dec_ring_get_rptr,
.get_wptr = vcn_v2_5_dec_ring_get_wptr,

View File

@ -1786,6 +1786,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0x3f,
.nop = VCN_DEC_SW_CMD_NO_OP,
.secure_submission_supported = true,
.vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v3_0_dec_ring_get_rptr,
.get_wptr = vcn_v3_0_dec_ring_get_wptr,
@ -1944,6 +1945,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
.secure_submission_supported = true,
.vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v3_0_dec_ring_get_rptr,
.get_wptr = vcn_v3_0_dec_ring_get_wptr,

View File

@ -1759,14 +1759,18 @@ static int criu_checkpoint_bos(struct kfd_process *p,
goto exit;
}
}
if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
if (bo_bucket->alloc_flags
& (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
ret = criu_get_prime_handle(&dumper_bo->tbo.base,
bo_bucket->alloc_flags &
KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? DRM_RDWR : 0,
&bo_bucket->dmabuf_fd);
if (ret)
goto exit;
} else {
bo_bucket->dmabuf_fd = KFD_INVALID_FD;
}
if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL)
bo_bucket->offset = KFD_MMAP_TYPE_DOORBELL |
KFD_MMAP_GPU_ID(pdd->dev->id);
@ -1812,7 +1816,8 @@ static int criu_checkpoint_bos(struct kfd_process *p,
exit:
while (ret && bo_index--) {
if (bo_buckets[bo_index].alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
if (bo_buckets[bo_index].alloc_flags
& (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT))
close_fd(bo_buckets[bo_index].dmabuf_fd);
}
@ -2094,6 +2099,136 @@ exit:
return ret;
}
static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
struct kfd_criu_bo_bucket *bo_bucket,
struct kfd_criu_bo_priv_data *bo_priv,
struct kgd_mem **kgd_mem)
{
int idr_handle;
int ret;
const bool criu_resume = true;
u64 offset;
if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
if (bo_bucket->size != kfd_doorbell_process_slice(pdd->dev))
return -EINVAL;
offset = kfd_get_process_doorbells(pdd);
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
/* MMIO BOs need remapped bus address */
if (bo_bucket->size != PAGE_SIZE) {
pr_err("Invalid page size\n");
return -EINVAL;
}
offset = pdd->dev->adev->rmmio_remap.bus_addr;
if (!offset) {
pr_err("amdgpu_amdkfd_get_mmio_remap_phys_addr failed\n");
return -ENOMEM;
}
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
offset = bo_priv->user_addr;
}
/* Create the BO */
ret = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(pdd->dev->adev, bo_bucket->addr,
bo_bucket->size, pdd->drm_priv, kgd_mem,
&offset, bo_bucket->alloc_flags, criu_resume);
if (ret) {
pr_err("Could not create the BO\n");
return ret;
}
pr_debug("New BO created: size:0x%llx addr:0x%llx offset:0x%llx\n",
bo_bucket->size, bo_bucket->addr, offset);
/* Restore previous IDR handle */
pr_debug("Restoring old IDR handle for the BO");
idr_handle = idr_alloc(&pdd->alloc_idr, *kgd_mem, bo_priv->idr_handle,
bo_priv->idr_handle + 1, GFP_KERNEL);
if (idr_handle < 0) {
pr_err("Could not allocate idr\n");
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, *kgd_mem, pdd->drm_priv,
NULL);
return -ENOMEM;
}
if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL)
bo_bucket->restored_offset = KFD_MMAP_TYPE_DOORBELL | KFD_MMAP_GPU_ID(pdd->dev->id);
if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
bo_bucket->restored_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(pdd->dev->id);
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
bo_bucket->restored_offset = offset;
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
bo_bucket->restored_offset = offset;
/* Update the VRAM usage count */
WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + bo_bucket->size);
}
return 0;
}
static int criu_restore_bo(struct kfd_process *p,
struct kfd_criu_bo_bucket *bo_bucket,
struct kfd_criu_bo_priv_data *bo_priv)
{
struct kfd_process_device *pdd;
struct kgd_mem *kgd_mem;
int ret;
int j;
pr_debug("Restoring BO size:0x%llx addr:0x%llx gpu_id:0x%x flags:0x%x idr_handle:0x%x\n",
bo_bucket->size, bo_bucket->addr, bo_bucket->gpu_id, bo_bucket->alloc_flags,
bo_priv->idr_handle);
pdd = kfd_process_device_data_by_id(p, bo_bucket->gpu_id);
if (!pdd) {
pr_err("Failed to get pdd\n");
return -ENODEV;
}
ret = criu_restore_memory_of_gpu(pdd, bo_bucket, bo_priv, &kgd_mem);
if (ret)
return ret;
/* now map these BOs to GPU/s */
for (j = 0; j < p->n_pdds; j++) {
struct kfd_dev *peer;
struct kfd_process_device *peer_pdd;
if (!bo_priv->mapped_gpuids[j])
break;
peer_pdd = kfd_process_device_data_by_id(p, bo_priv->mapped_gpuids[j]);
if (!peer_pdd)
return -EINVAL;
peer = peer_pdd->dev;
peer_pdd = kfd_bind_process_to_device(peer, p);
if (IS_ERR(peer_pdd))
return PTR_ERR(peer_pdd);
ret = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(peer->adev, kgd_mem, peer_pdd->drm_priv,
NULL);
if (ret) {
pr_err("Failed to map to gpu %d/%d\n", j, p->n_pdds);
return ret;
}
}
pr_debug("map memory was successful for the BO\n");
/* create the dmabuf object and export the bo */
if (bo_bucket->alloc_flags
& (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
ret = criu_get_prime_handle(&kgd_mem->bo->tbo.base, DRM_RDWR,
&bo_bucket->dmabuf_fd);
if (ret)
return ret;
} else {
bo_bucket->dmabuf_fd = KFD_INVALID_FD;
}
return 0;
}
static int criu_restore_bos(struct kfd_process *p,
struct kfd_ioctl_criu_args *args,
uint64_t *priv_offset,
@ -2101,9 +2236,7 @@ static int criu_restore_bos(struct kfd_process *p,
{
struct kfd_criu_bo_bucket *bo_buckets = NULL;
struct kfd_criu_bo_priv_data *bo_privs = NULL;
const bool criu_resume = true;
bool flush_tlbs = false;
int ret = 0, j = 0;
int ret = 0;
uint32_t i = 0;
if (*priv_offset + (args->num_bos * sizeof(*bo_privs)) > max_priv_data_size)
@ -2141,178 +2274,13 @@ static int criu_restore_bos(struct kfd_process *p,
/* Create and map new BOs */
for (; i < args->num_bos; i++) {
struct kfd_criu_bo_bucket *bo_bucket;
struct kfd_criu_bo_priv_data *bo_priv;
struct kfd_dev *dev;
struct kfd_process_device *pdd;
struct kgd_mem *kgd_mem;
void *mem;
u64 offset;
int idr_handle;
bo_bucket = &bo_buckets[i];
bo_priv = &bo_privs[i];
pr_debug("kfd restore ioctl - bo_bucket[%d]:\n", i);
pr_debug("size = 0x%llx, bo_addr = 0x%llx bo_offset = 0x%llx\n"
"gpu_id = 0x%x alloc_flags = 0x%x\n"
"idr_handle = 0x%x\n",
bo_bucket->size,
bo_bucket->addr,
bo_bucket->offset,
bo_bucket->gpu_id,
bo_bucket->alloc_flags,
bo_priv->idr_handle);
pdd = kfd_process_device_data_by_id(p, bo_bucket->gpu_id);
if (!pdd) {
pr_err("Failed to get pdd\n");
ret = -ENODEV;
goto exit;
}
dev = pdd->dev;
if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
pr_debug("restore ioctl: KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL\n");
if (bo_bucket->size != kfd_doorbell_process_slice(dev)) {
ret = -EINVAL;
goto exit;
}
offset = kfd_get_process_doorbells(pdd);
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
/* MMIO BOs need remapped bus address */
pr_debug("restore ioctl :KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP\n");
if (bo_bucket->size != PAGE_SIZE) {
pr_err("Invalid page size\n");
ret = -EINVAL;
goto exit;
}
offset = dev->adev->rmmio_remap.bus_addr;
if (!offset) {
pr_err("amdgpu_amdkfd_get_mmio_remap_phys_addr failed\n");
ret = -ENOMEM;
goto exit;
}
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
offset = bo_priv->user_addr;
}
/* Create the BO */
ret = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(dev->adev,
bo_bucket->addr,
bo_bucket->size,
pdd->drm_priv,
(struct kgd_mem **) &mem,
&offset,
bo_bucket->alloc_flags,
criu_resume);
ret = criu_restore_bo(p, &bo_buckets[i], &bo_privs[i]);
if (ret) {
pr_err("Could not create the BO\n");
ret = -ENOMEM;
pr_debug("Failed to restore BO[%d] ret%d\n", i, ret);
goto exit;
}
pr_debug("New BO created: size = 0x%llx, bo_addr = 0x%llx bo_offset = 0x%llx\n",
bo_bucket->size, bo_bucket->addr, offset);
/* Restore previuos IDR handle */
pr_debug("Restoring old IDR handle for the BO");
idr_handle = idr_alloc(&pdd->alloc_idr, mem,
bo_priv->idr_handle,
bo_priv->idr_handle + 1, GFP_KERNEL);
if (idr_handle < 0) {
pr_err("Could not allocate idr\n");
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev,
(struct kgd_mem *)mem,
pdd->drm_priv, NULL);
ret = -ENOMEM;
goto exit;
}
if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL)
bo_bucket->restored_offset = KFD_MMAP_TYPE_DOORBELL |
KFD_MMAP_GPU_ID(pdd->dev->id);
if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
bo_bucket->restored_offset = KFD_MMAP_TYPE_MMIO |
KFD_MMAP_GPU_ID(pdd->dev->id);
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
bo_bucket->restored_offset = offset;
pr_debug("updating offset for GTT\n");
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
bo_bucket->restored_offset = offset;
/* Update the VRAM usage count */
WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + bo_bucket->size);
pr_debug("updating offset for VRAM\n");
}
/* now map these BOs to GPU/s */
for (j = 0; j < p->n_pdds; j++) {
struct kfd_dev *peer;
struct kfd_process_device *peer_pdd;
bool table_freed = false;
if (!bo_priv->mapped_gpuids[j])
break;
peer_pdd = kfd_process_device_data_by_id(p, bo_priv->mapped_gpuids[j]);
if (!peer_pdd) {
ret = -EINVAL;
goto exit;
}
peer = peer_pdd->dev;
peer_pdd = kfd_bind_process_to_device(peer, p);
if (IS_ERR(peer_pdd)) {
ret = PTR_ERR(peer_pdd);
goto exit;
}
pr_debug("map mem in restore ioctl -> 0x%llx\n",
((struct kgd_mem *)mem)->va);
ret = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(peer->adev,
(struct kgd_mem *)mem, peer_pdd->drm_priv, &table_freed);
if (ret) {
pr_err("Failed to map to gpu %d/%d\n", j, p->n_pdds);
goto exit;
}
if (table_freed)
flush_tlbs = true;
}
ret = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev,
(struct kgd_mem *) mem, true);
if (ret) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto exit;
}
pr_debug("map memory was successful for the BO\n");
/* create the dmabuf object and export the bo */
kgd_mem = (struct kgd_mem *)mem;
if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
ret = criu_get_prime_handle(&kgd_mem->bo->tbo.base,
DRM_RDWR,
&bo_bucket->dmabuf_fd);
if (ret)
goto exit;
}
} /* done */
if (flush_tlbs) {
/* Flush TLBs after waiting for the page table updates to complete */
for (j = 0; j < p->n_pdds; j++) {
struct kfd_dev *peer;
struct kfd_process_device *pdd = p->pdds[j];
struct kfd_process_device *peer_pdd;
peer = kfd_device_by_id(pdd->dev->id);
if (WARN_ON_ONCE(!peer))
continue;
peer_pdd = kfd_get_process_device_data(peer, p);
if (WARN_ON_ONCE(!peer_pdd))
continue;
kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
}
}
/* Copy only the buckets back so user can read bo_buckets[N].restored_offset */
ret = copy_to_user((void __user *)args->bos,
bo_buckets,
@ -2322,7 +2290,8 @@ static int criu_restore_bos(struct kfd_process *p,
exit:
while (ret && i--) {
if (bo_buckets[i].alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
if (bo_buckets[i].alloc_flags
& (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT))
close_fd(bo_buckets[i].dmabuf_fd);
}
kvfree(bo_buckets);

View File

@ -638,6 +638,22 @@ out_oom:
return r;
}
/**
* svm_migrate_vma_to_ram - migrate range inside one vma from device to system
*
* @adev: amdgpu device to migrate from
* @prange: svm range structure
* @vma: vm_area_struct that range [start, end] belongs to
* @start: range start virtual address in pages
* @end: range end virtual address in pages
*
* Context: Process context, caller hold mmap read lock, prange->migrate_mutex
*
* Return:
* 0 - success with all pages migrated
* negative values - indicate error
* positive values - partial migration, number of pages not migrated
*/
static long
svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start, uint64_t end)
@ -709,8 +725,6 @@ out:
pdd = svm_range_get_pdd_by_adev(prange, adev);
if (pdd)
WRITE_ONCE(pdd->page_out, pdd->page_out + cpages);
return upages;
}
return r ? r : upages;
}
@ -720,7 +734,7 @@ out:
* @prange: range structure
* @mm: process mm, use current->mm if NULL
*
* Context: Process context, caller hold mmap read lock, svms lock, prange lock
* Context: Process context, caller hold mmap read lock, prange->migrate_mutex
*
* Return:
* 0 - OK, otherwise error code
@ -759,13 +773,16 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
unsigned long next;
vma = find_vma(mm, addr);
if (!vma || addr < vma->vm_start)
if (!vma || addr < vma->vm_start) {
pr_debug("failed to find vma for prange %p\n", prange);
r = -EFAULT;
break;
}
next = min(vma->vm_end, end);
r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next);
if (r < 0) {
pr_debug("failed %ld to migrate\n", r);
pr_debug("failed %ld to migrate prange %p\n", r, prange);
break;
} else {
upages += r;
@ -773,7 +790,7 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
addr = next;
}
if (!upages) {
if (r >= 0 && !upages) {
svm_range_vram_node_free(prange);
prange->actual_loc = 0;
}

View File

@ -3155,6 +3155,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
struct svm_range_bo *svm_bo;
struct kfd_process *p;
struct mm_struct *mm;
int r = 0;
svm_bo = container_of(work, struct svm_range_bo, eviction_work);
if (!svm_bo_ref_unless_zero(svm_bo))
@ -3170,7 +3171,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
mmap_read_lock(mm);
spin_lock(&svm_bo->list_lock);
while (!list_empty(&svm_bo->range_list)) {
while (!list_empty(&svm_bo->range_list) && !r) {
struct svm_range *prange =
list_first_entry(&svm_bo->range_list,
struct svm_range, svm_bo_list);
@ -3184,15 +3185,18 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
mutex_lock(&prange->migrate_mutex);
do {
svm_migrate_vram_to_ram(prange,
r = svm_migrate_vram_to_ram(prange,
svm_bo->eviction_fence->mm);
} while (prange->actual_loc && --retries);
WARN(prange->actual_loc, "Migration failed during eviction");
} while (!r && prange->actual_loc && --retries);
mutex_lock(&prange->lock);
prange->svm_bo = NULL;
mutex_unlock(&prange->lock);
if (!r && prange->actual_loc)
pr_info_once("Migration failed during eviction");
if (!prange->actual_loc) {
mutex_lock(&prange->lock);
prange->svm_bo = NULL;
mutex_unlock(&prange->lock);
}
mutex_unlock(&prange->migrate_mutex);
spin_lock(&svm_bo->list_lock);
@ -3201,10 +3205,11 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
mmap_read_unlock(mm);
dma_fence_signal(&svm_bo->eviction_fence->base);
/* This is the last reference to svm_bo, after svm_range_vram_node_free
* has been called in svm_migrate_vram_to_ram
*/
WARN_ONCE(kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
svm_range_bo_unref(svm_bo);
}

View File

@ -1481,6 +1481,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 1, 3):
case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6):
init_data.flags.gpu_vm_support = true;
break;
default:
@ -2633,10 +2634,13 @@ static int dm_resume(void *handle)
* before the 0 streams commit.
*
* DC expects that link encoder assignments are *not* valid
* when committing a state, so as a workaround it needs to be
* cleared here.
* when committing a state, so as a workaround we can copy
* off of the current state.
*
* We lose the previous assignments, but we had already
* commit 0 streams anyway.
*/
link_enc_cfg_init(dm->dc, dc_state);
link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
if (dc_enable_dmub_notifications(adev->dm.dc))
amdgpu_dm_outbox_init(adev);
@ -6356,7 +6360,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
}
}
aconnector->freesync_vid_base = *m_pref;
drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
return m_pref;
}
@ -6469,8 +6473,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
recalculate_timing = is_freesync_video_mode(&mode, aconnector);
if (recalculate_timing) {
freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
saved_mode = mode;
mode = *freesync_mode;
drm_mode_copy(&saved_mode, &mode);
drm_mode_copy(&mode, freesync_mode);
} else {
decide_crtc_timing_for_drm_display_mode(
&mode, preferred_mode, scale);
@ -10177,27 +10181,27 @@ static bool
is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
struct drm_crtc_state *new_crtc_state)
{
struct drm_display_mode old_mode, new_mode;
const struct drm_display_mode *old_mode, *new_mode;
if (!old_crtc_state || !new_crtc_state)
return false;
old_mode = old_crtc_state->mode;
new_mode = new_crtc_state->mode;
old_mode = &old_crtc_state->mode;
new_mode = &new_crtc_state->mode;
if (old_mode.clock == new_mode.clock &&
old_mode.hdisplay == new_mode.hdisplay &&
old_mode.vdisplay == new_mode.vdisplay &&
old_mode.htotal == new_mode.htotal &&
old_mode.vtotal != new_mode.vtotal &&
old_mode.hsync_start == new_mode.hsync_start &&
old_mode.vsync_start != new_mode.vsync_start &&
old_mode.hsync_end == new_mode.hsync_end &&
old_mode.vsync_end != new_mode.vsync_end &&
old_mode.hskew == new_mode.hskew &&
old_mode.vscan == new_mode.vscan &&
(old_mode.vsync_end - old_mode.vsync_start) ==
(new_mode.vsync_end - new_mode.vsync_start))
if (old_mode->clock == new_mode->clock &&
old_mode->hdisplay == new_mode->hdisplay &&
old_mode->vdisplay == new_mode->vdisplay &&
old_mode->htotal == new_mode->htotal &&
old_mode->vtotal != new_mode->vtotal &&
old_mode->hsync_start == new_mode->hsync_start &&
old_mode->vsync_start != new_mode->vsync_start &&
old_mode->hsync_end == new_mode->hsync_end &&
old_mode->vsync_end != new_mode->vsync_end &&
old_mode->hskew == new_mode->hskew &&
old_mode->vscan == new_mode->vscan &&
(old_mode->vsync_end - old_mode->vsync_start) ==
(new_mode->vsync_end - new_mode->vsync_start))
return true;
return false;

View File

@ -243,6 +243,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
{
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
struct dc_link *link = connector->dc_link;
struct amdgpu_device *adev = drm_to_adev(connector->base.dev);
struct dc *dc = (struct dc *)link->dc;
struct dc_link_settings prefer_link_settings;
char *wr_buf = NULL;
@ -302,6 +303,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
if (!valid_input) {
kfree(wr_buf);
DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n");
mutex_lock(&adev->dm.dc_lock);
dc_link_set_preferred_training_settings(dc, NULL, NULL, link, false);
mutex_unlock(&adev->dm.dc_lock);
return size;
}
@ -313,7 +317,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
prefer_link_settings.lane_count = param[0];
prefer_link_settings.link_rate = param[1];
dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true);
mutex_lock(&adev->dm.dc_lock);
dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, false);
mutex_unlock(&adev->dm.dc_lock);
kfree(wr_buf);
return size;
@ -2883,7 +2889,9 @@ static ssize_t edp_ilr_write(struct file *f, const char __user *buf,
kfree(wr_buf);
DRM_DEBUG_DRIVER("Invalid Input value. No HW will be programmed\n");
prefer_link_settings.use_link_rate_set = false;
mutex_lock(&adev->dm.dc_lock);
dc_link_set_preferred_training_settings(dc, NULL, NULL, link, false);
mutex_unlock(&adev->dm.dc_lock);
return size;
}

View File

@ -456,7 +456,7 @@ static enum bp_result transmitter_control_v2(
if ((CONNECTOR_ID_DUAL_LINK_DVII == connector_id) ||
(CONNECTOR_ID_DUAL_LINK_DVID == connector_id))
/* on INIT this bit should be set according to the
* phisycal connector
* physical connector
* Bit0: dual link connector flag
* =0 connector is single link connector
* =1 connector is dual link connector
@ -468,7 +468,7 @@ static enum bp_result transmitter_control_v2(
cpu_to_le16((uint8_t)cntl->connector_obj_id.id);
break;
case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS:
/* votage swing and pre-emphsis */
/* voltage swing and pre-emphsis */
params.asMode.ucLaneSel = (uint8_t)cntl->lane_select;
params.asMode.ucLaneSet = (uint8_t)cntl->lane_settings;
break;
@ -2120,7 +2120,7 @@ static enum bp_result program_clock_v5(
memset(&params, 0, sizeof(params));
if (!bp->cmd_helper->clock_source_id_to_atom(
bp_params->pll_id, &atom_pll_id)) {
BREAK_TO_DEBUGGER(); /* Invalid Inpute!! */
BREAK_TO_DEBUGGER(); /* Invalid Input!! */
return BP_RESULT_BADINPUT;
}

View File

@ -25,30 +25,23 @@
#include "dccg.h"
#include "clk_mgr_internal.h"
#include "dcn30_clk_mgr_smu_msg.h"
#include "dcn20/dcn20_clk_mgr.h"
#include "dce100/dce_clk_mgr.h"
#include "dcn30/dcn30_clk_mgr.h"
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
#include "atomfirmware.h"
#include "sienna_cichlid_ip_offset.h"
#include "dcn/dcn_3_0_0_offset.h"
#include "dcn/dcn_3_0_0_sh_mask.h"
#include "nbio/nbio_7_4_offset.h"
#include "dpcs/dpcs_3_0_0_offset.h"
#include "dpcs/dpcs_3_0_0_sh_mask.h"
#include "mmhub/mmhub_2_0_0_offset.h"
#include "mmhub/mmhub_2_0_0_sh_mask.h"
/*we don't have clk folder yet*/
#include "dcn30/dcn30_clk_mgr.h"
#include "dcn30_smu11_driver_if.h"
#undef FN
#define FN(reg_name, field_name) \
@ -83,7 +76,7 @@ static const struct clk_mgr_mask clk_mgr_mask = {
/* Query SMU for all clock states for a particular clock */
static void dcn3_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, unsigned int *entry_0, unsigned int *num_levels)
static void dcn3_init_single_clock(struct clk_mgr_internal *clk_mgr, uint32_t clk, unsigned int *entry_0, unsigned int *num_levels)
{
unsigned int i;
char *entry_i = (char *)entry_0;

View File

@ -29,6 +29,7 @@
#include "clk_mgr_internal.h"
#include "reg_helper.h"
#include "dalsmc.h"
#include "dcn30_smu11_driver_if.h"
#define mmDAL_MSG_REG 0x1628A
#define mmDAL_ARG_REG 0x16273
@ -197,7 +198,7 @@ void dcn30_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
}
/* Returns the actual frequency that was set in MHz, 0 on failure */
unsigned int dcn30_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint16_t freq_mhz)
unsigned int dcn30_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz)
{
uint32_t response = 0;
@ -215,7 +216,7 @@ unsigned int dcn30_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, PP
}
/* Returns the actual frequency that was set in MHz, 0 on failure */
unsigned int dcn30_smu_set_hard_max_by_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint16_t freq_mhz)
unsigned int dcn30_smu_set_hard_max_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz)
{
uint32_t response = 0;
@ -246,7 +247,7 @@ unsigned int dcn30_smu_set_hard_max_by_freq(struct clk_mgr_internal *clk_mgr, PP
*
* Returns 0 on failure
*/
unsigned int dcn30_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint8_t dpm_level)
unsigned int dcn30_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint8_t dpm_level)
{
uint32_t response = 0;
@ -264,7 +265,7 @@ unsigned int dcn30_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, P
}
/* Returns the max DPM frequency in DC mode in MHz, 0 on failure */
unsigned int dcn30_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk)
unsigned int dcn30_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk)
{
uint32_t response = 0;

View File

@ -28,65 +28,6 @@
#include "core_types.h"
#define SMU11_DRIVER_IF_VERSION 0x1F
typedef enum {
PPCLK_GFXCLK = 0,
PPCLK_SOCCLK,
PPCLK_UCLK,
PPCLK_FCLK,
PPCLK_DCLK_0,
PPCLK_VCLK_0,
PPCLK_DCLK_1,
PPCLK_VCLK_1,
PPCLK_DCEFCLK,
PPCLK_DISPCLK,
PPCLK_PIXCLK,
PPCLK_PHYCLK,
PPCLK_DTBCLK,
PPCLK_COUNT,
} PPCLK_e;
typedef struct {
uint16_t MinClock; // This is either DCEFCLK or SOCCLK (in MHz)
uint16_t MaxClock; // This is either DCEFCLK or SOCCLK (in MHz)
uint16_t MinUclk;
uint16_t MaxUclk;
uint8_t WmSetting;
uint8_t Flags;
uint8_t Padding[2];
} WatermarkRowGeneric_t;
#define NUM_WM_RANGES 4
typedef enum {
WM_SOCCLK = 0,
WM_DCEFCLK,
WM_COUNT,
} WM_CLOCK_e;
typedef enum {
WATERMARKS_CLOCK_RANGE = 0,
WATERMARKS_DUMMY_PSTATE,
WATERMARKS_MALL,
WATERMARKS_COUNT,
} WATERMARKS_FLAGS_e;
typedef struct {
// Watermarks
WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES];
} Watermarks_t;
typedef struct {
Watermarks_t Watermarks;
uint32_t MmHubPadding[8]; // SMU internal use
} WatermarksExternal_t;
#define TABLE_WATERMARKS 1
struct clk_mgr_internal;
bool dcn30_smu_test_message(struct clk_mgr_internal *clk_mgr, uint32_t input);
@ -97,10 +38,10 @@ void dcn30_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint
void dcn30_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low);
void dcn30_smu_transfer_wm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr);
void dcn30_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
unsigned int dcn30_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint16_t freq_mhz);
unsigned int dcn30_smu_set_hard_max_by_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint16_t freq_mhz);
unsigned int dcn30_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint8_t dpm_level);
unsigned int dcn30_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk);
unsigned int dcn30_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz);
unsigned int dcn30_smu_set_hard_max_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz);
unsigned int dcn30_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint8_t dpm_level);
unsigned int dcn30_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk);
void dcn30_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz);
void dcn30_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t num_displays);
void dcn30_smu_set_display_refresh_from_mall(struct clk_mgr_internal *clk_mgr, bool enable, uint8_t cache_timer_delay, uint8_t cache_timer_scale);

View File

@ -0,0 +1,74 @@
// This is a stripped-down version of the smu11_driver_if.h file for the relevant DAL interfaces.
#define SMU11_DRIVER_IF_VERSION 0x40
//Only Clks that have DPM descriptors are listed here
typedef enum {
PPCLK_GFXCLK = 0,
PPCLK_SOCCLK,
PPCLK_UCLK,
PPCLK_FCLK,
PPCLK_DCLK_0,
PPCLK_VCLK_0,
PPCLK_DCLK_1,
PPCLK_VCLK_1,
PPCLK_DCEFCLK,
PPCLK_DISPCLK,
PPCLK_PIXCLK,
PPCLK_PHYCLK,
PPCLK_DTBCLK,
PPCLK_COUNT,
} PPCLK_e;
typedef struct {
uint16_t MinClock; // This is either DCEFCLK or SOCCLK (in MHz)
uint16_t MaxClock; // This is either DCEFCLK or SOCCLK (in MHz)
uint16_t MinUclk;
uint16_t MaxUclk;
uint8_t WmSetting;
uint8_t Flags;
uint8_t Padding[2];
} WatermarkRowGeneric_t;
#define NUM_WM_RANGES 4
typedef enum {
WM_SOCCLK = 0,
WM_DCEFCLK,
WM_COUNT,
} WM_CLOCK_e;
typedef enum {
WATERMARKS_CLOCK_RANGE = 0,
WATERMARKS_DUMMY_PSTATE,
WATERMARKS_MALL,
WATERMARKS_COUNT,
} WATERMARKS_FLAGS_e;
typedef struct {
// Watermarks
WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES];
} Watermarks_t;
typedef struct {
Watermarks_t Watermarks;
uint32_t MmHubPadding[8]; // SMU internal use
} WatermarksExternal_t;
// Table types
#define TABLE_PPTABLE 0
#define TABLE_WATERMARKS 1
#define TABLE_AVFS_PSM_DEBUG 2
#define TABLE_AVFS_FUSE_OVERRIDE 3
#define TABLE_PMSTATUSLOG 4
#define TABLE_SMU_METRICS 5
#define TABLE_DRIVER_SMU_CONFIG 6
#define TABLE_ACTIVITY_MONITOR_COEFF 7
#define TABLE_OVERDRIVE 8
#define TABLE_I2C_COMMANDS 9
#define TABLE_PACE 10
#define TABLE_ECCINFO 11
#define TABLE_COUNT 12

View File

@ -131,6 +131,7 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
union display_idle_optimization_u idle_info = { 0 };
idle_info.idle_info.df_request_disabled = 1;
idle_info.idle_info.phy_ref_clk_off = 1;
idle_info.idle_info.s0i2_rdy = 1;
dcn315_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
/* update power state */
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
@ -586,8 +587,10 @@ void dcn315_clk_mgr_construct(
}
clk_mgr->base.base.dprefclk_khz = 600000;
clk_mgr->base.dccg->ref_dtbclk_khz = 600000;
clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base);
clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
dce_clock_read_ss_info(&clk_mgr->base);
clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);
clk_mgr->base.base.bw_params = &dcn315_bw_params;

View File

@ -312,3 +312,27 @@ void dcn315_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
VBIOSSMC_MSG_TransferTableDram2Smu, TABLE_WATERMARKS);
}
int dcn315_smu_get_dpref_clk(struct clk_mgr_internal *clk_mgr)
{
int dprefclk_get_mhz = -1;
if (clk_mgr->smu_present) {
dprefclk_get_mhz = dcn315_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_GetDprefclkFreq,
0);
}
return (dprefclk_get_mhz * 1000);
}
int dcn315_smu_get_smu_fclk(struct clk_mgr_internal *clk_mgr)
{
int fclk_get_mhz = -1;
if (clk_mgr->smu_present) {
fclk_get_mhz = dcn315_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_GetFclkFrequency,
0);
}
return (fclk_get_mhz * 1000);
}

View File

@ -123,4 +123,6 @@ void dcn315_smu_transfer_dpm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr);
void dcn315_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
void dcn315_smu_request_voltage_via_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz);
void dcn315_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr);
int dcn315_smu_get_dpref_clk(struct clk_mgr_internal *clk_mgr);
int dcn315_smu_get_smu_fclk(struct clk_mgr_internal *clk_mgr);
#endif /* DAL_DC_315_SMU_H_ */

View File

@ -172,8 +172,7 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
union display_idle_optimization_u idle_info = { 0 };
idle_info.idle_info.df_request_disabled = 1;
idle_info.idle_info.phy_ref_clk_off = 1;
// Todo DCN316 set this to 1 if any no issue
idle_info.idle_info.s0i2_rdy = 0;
idle_info.idle_info.s0i2_rdy = 1;
dcn316_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
/* update power state */
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
@ -684,8 +683,11 @@ void dcn316_clk_mgr_construct(
}
clk_mgr->base.base.dprefclk_khz = 600000;
clk_mgr->base.dccg->ref_dtbclk_khz = 600000;
clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base);
clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
dce_clock_read_ss_info(&clk_mgr->base);
clk_mgr->base.dccg->ref_dtbclk_khz =
dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);
clk_mgr->base.base.bw_params = &dcn316_bw_params;

View File

@ -310,3 +310,29 @@ void dcn316_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)
VBIOSSMC_MSG_SetDtbclkFreq,
enable);
}
int dcn316_smu_get_dpref_clk(struct clk_mgr_internal *clk_mgr)
{
int dprefclk_get_mhz = -1;
if (clk_mgr->smu_present) {
dprefclk_get_mhz = dcn316_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_GetDprefclkFreq,
0);
}
return (dprefclk_get_mhz * 1000);
}
int dcn316_smu_get_smu_fclk(struct clk_mgr_internal *clk_mgr)
{
int fclk_get_mhz = -1;
if (clk_mgr->smu_present) {
fclk_get_mhz = dcn316_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_GetFclkFrequency,
0);
}
return (fclk_get_mhz * 1000);
}

View File

@ -133,5 +133,7 @@ void dcn316_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
void dcn316_smu_request_voltage_via_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz);
void dcn316_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr);
void dcn316_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable);
int dcn316_smu_get_dpref_clk(struct clk_mgr_internal *clk_mgr);
int dcn316_smu_get_smu_fclk(struct clk_mgr_internal *clk_mgr);
#endif /* DAL_DC_316_SMU_H_ */

View File

@ -1082,7 +1082,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
break;
}
}
if (!should_disable && pipe_split_change)
if (!should_disable && pipe_split_change &&
dc->current_state->stream_count != context->stream_count)
should_disable = true;
if (should_disable && old_stream) {
@ -1690,6 +1691,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
struct pipe_ctx *pipe;
int i, k, l;
struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
struct dc_state *old_state;
#if defined(CONFIG_DRM_AMD_DC_DCN)
dc_z10_restore(dc);
@ -1808,10 +1810,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
for (i = 0; i < context->stream_count; i++)
context->streams[i]->mode_changed = false;
dc_release_state(dc->current_state);
old_state = dc->current_state;
dc->current_state = context;
dc_release_state(old_state);
dc_retain_state(dc->current_state);
return result;
@ -3360,6 +3363,19 @@ bool dc_is_dmcu_initialized(struct dc *dc)
return false;
}
bool dc_is_oem_i2c_device_present(
struct dc *dc,
size_t slave_address)
{
if (dc->res_pool->oem_device)
return dce_i2c_oem_device_present(
dc->res_pool,
dc->res_pool->oem_device,
slave_address);
return false;
}
bool dc_submit_i2c(
struct dc *dc,
uint32_t link_index,

View File

@ -50,6 +50,7 @@
#include "inc/hw/panel_cntl.h"
#include "inc/link_enc_cfg.h"
#include "inc/link_dpcd.h"
#include "link/link_dp_trace.h"
#include "dc/dcn30/dcn30_vpg.h"
@ -730,6 +731,7 @@ static bool detect_dp(struct dc_link *link,
sink_caps,
audio_support);
link->dpcd_caps.dongle_type = sink_caps->dongle_type;
link->dpcd_caps.is_dongle_type_one = sink_caps->is_dongle_type_one;
link->dpcd_caps.dpcd_rev.raw = 0;
}
@ -1181,6 +1183,9 @@ static bool detect_link_and_local_sink(struct dc_link *link,
case EDID_BAD_CHECKSUM:
DC_LOG_ERROR("EDID checksum invalid.\n");
break;
case EDID_PARTIAL_VALID:
DC_LOG_ERROR("Partial EDID valid, abandon invalid blocks.\n");
break;
case EDID_NO_RESPONSE:
DC_LOG_ERROR("No EDID read.\n");
/*
@ -1255,6 +1260,9 @@ static bool detect_link_and_local_sink(struct dc_link *link,
!sink->edid_caps.edid_hdmi)
sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
if (link->local_sink && dc_is_dp_signal(sink_caps.signal))
dp_trace_init(link);
/* Connectivity log: detection */
for (i = 0; i < sink->dc_edid.length / DC_EDID_BLOCK_SIZE; i++) {
CONN_DATA_DETECT(link,
@ -1307,6 +1315,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
link->dongle_max_pix_clk = 0;
dc_link_clear_dprx_states(link);
dp_trace_reset(link);
}
LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p edid same=%d\n",

View File

@ -493,6 +493,7 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
sink_cap->max_hdmi_pixel_clock =
max_tmds_clk * 1000;
}
sink_cap->is_dongle_type_one = false;
} else {
if (is_valid_hdmi_signature == true) {
@ -510,6 +511,7 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
"Type 1 DP-HDMI passive dongle (no signature) %dMhz: ",
sink_cap->max_hdmi_pixel_clock / 1000);
}
sink_cap->is_dongle_type_one = true;
}
return;

View File

@ -39,6 +39,7 @@
#include "dce/dmub_hw_lock_mgr.h"
#include "inc/dc_link_dpia.h"
#include "inc/link_enc_cfg.h"
#include "link/link_dp_trace.h"
/*Travis*/
static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT";
@ -347,29 +348,6 @@ static void vendor_specific_lttpr_wa_one_start(struct dc_link *link)
sizeof(vendor_lttpr_write_data));
}
static void vendor_specific_lttpr_wa_one_end(
struct dc_link *link,
uint8_t retry_count)
{
const uint8_t vendor_lttpr_write_data[4] = {0x1, 0x50, 0x63, 0x0};
const uint8_t offset = dp_convert_to_count(
link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
uint32_t vendor_lttpr_write_address = 0xF004F;
if (!retry_count) {
if (offset != 0xFF)
vendor_lttpr_write_address +=
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
/* W/A for certain LTTPR to reset their lane settings, part two of two */
core_link_write_dpcd(
link,
vendor_lttpr_write_address,
&vendor_lttpr_write_data[0],
sizeof(vendor_lttpr_write_data));
}
}
static void vendor_specific_lttpr_wa_one_two(
struct dc_link *link,
const uint8_t rate)
@ -396,9 +374,9 @@ static void vendor_specific_lttpr_wa_one_two(
}
}
static void vendor_specific_lttpr_wa_three(
static void dp_fixed_vs_pe_read_lane_adjust(
struct dc_link *link,
union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX])
union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX])
{
const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63};
const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63};
@ -440,23 +418,8 @@ static void vendor_specific_lttpr_wa_three(
1);
for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE = (dprx_vs >> (2 * lane)) & 0x3;
dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE = (dprx_pe >> (2 * lane)) & 0x3;
}
}
static void vendor_specific_lttpr_wa_three_dpcd(
struct dc_link *link,
union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX])
{
union lane_adjust lane_adjust[LANE_COUNT_DP_MAX];
uint8_t lane = 0;
vendor_specific_lttpr_wa_three(link, lane_adjust);
for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = lane_adjust[lane].bits.VOLTAGE_SWING_LANE;
dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET = lane_adjust[lane].bits.PRE_EMPHASIS_LANE;
dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = (dprx_vs >> (2 * lane)) & 0x3;
dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET = (dprx_pe >> (2 * lane)) & 0x3;
}
}
@ -1021,6 +984,14 @@ enum dc_status dp_get_lane_status_and_lane_adjust(
(uint8_t *)(dpcd_buf),
sizeof(dpcd_buf));
if (status != DC_OK) {
DC_LOG_HW_LINK_TRAINING("%s:\n Failed to read from address 0x%X,"
" keep current lane status and lane adjust unchanged",
__func__,
lane01_status_address);
return status;
}
for (lane = 0; lane <
(uint32_t)(link_training_setting->link_settings.lane_count);
lane++) {
@ -1161,6 +1132,9 @@ static bool perform_post_lt_adj_req_sequence(
uint32_t adj_req_timer;
bool req_drv_setting_changed;
uint32_t lane;
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
union lane_align_status_updated dpcd_lane_status_updated = {0};
union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
req_drv_setting_changed = false;
for (adj_req_count = 0; adj_req_count < POST_LT_ADJ_REQ_LIMIT;
@ -1172,11 +1146,6 @@ static bool perform_post_lt_adj_req_sequence(
adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT;
adj_req_timer++) {
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
union lane_align_status_updated
dpcd_lane_status_updated;
union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
dp_get_lane_status_and_lane_adjust(
link,
lt_settings,
@ -1403,6 +1372,10 @@ static enum link_training_result perform_clock_recovery_sequence(
retries_cr = 0;
retry_count = 0;
memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
memset(&dpcd_lane_status_updated, '\0',
sizeof(dpcd_lane_status_updated));
if (!link->ctx->dc->work_arounds.lt_early_cr_pattern)
dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset);
@ -1414,9 +1387,6 @@ static enum link_training_result perform_clock_recovery_sequence(
while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
(retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
memset(&dpcd_lane_status_updated, '\0',
sizeof(dpcd_lane_status_updated));
/* 1. call HWSS to set lane settings*/
dp_set_hw_lane_settings(
@ -1463,13 +1433,6 @@ static enum link_training_result perform_clock_recovery_sequence(
dpcd_lane_adjust,
offset);
if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
vendor_specific_lttpr_wa_one_end(link, retry_count);
vendor_specific_lttpr_wa_three(link, dpcd_lane_adjust);
}
/* 5. check CR done*/
if (dp_is_cr_done(lane_count, dpcd_lane_status))
return LINK_TRAINING_SUCCESS;
@ -2519,12 +2482,13 @@ static enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
retries_cr = 0;
retry_count = 0;
memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
memset(&dpcd_lane_status_updated, '\0',
sizeof(dpcd_lane_status_updated));
while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
(retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
memset(&dpcd_lane_status_updated, '\0',
sizeof(dpcd_lane_status_updated));
/* 1. call HWSS to set lane settings */
dp_set_hw_lane_settings(
@ -2821,6 +2785,10 @@ bool perform_link_training_with_retries(
enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0;
struct dc_link_settings current_setting = *link_setting;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
int fail_count = 0;
dp_trace_commit_lt_init(link);
if (dp_get_link_encoding_format(&current_setting) == DP_8b_10b_ENCODING)
/* We need to do this before the link training to ensure the idle
@ -2828,6 +2796,7 @@ bool perform_link_training_with_retries(
*/
link_hwss->setup_stream_encoder(pipe_ctx);
dp_trace_set_lt_start_timestamp(link, false);
for (j = 0; j < attempts; ++j) {
DC_LOG_HW_LINK_TRAINING("%s: Beginning link training attempt %u of %d\n",
@ -2883,10 +2852,15 @@ bool perform_link_training_with_retries(
skip_video_pattern);
}
dp_trace_lt_total_count_increment(link, false);
dp_trace_lt_result_update(link, status, false);
dp_trace_set_lt_end_timestamp(link, false);
if (status == LINK_TRAINING_SUCCESS)
return true;
}
fail_count++;
dp_trace_lt_fail_count_update(link, fail_count, false);
/* latest link training still fail, skip delay and keep PHY on
*/
if (j == (attempts - 1) && link->ep_type == DISPLAY_ENDPOINT_PHY)
@ -3354,6 +3328,8 @@ static bool dp_verify_link_cap(
} else {
(*fail_count)++;
}
dp_trace_lt_total_count_increment(link, true);
dp_trace_lt_result_update(link, status, true);
dp_disable_link_phy(link, &link_res, link->connector_signal);
} while (!success && decide_fallback_link_setting(link,
initial_link_settings, &cur_link_settings, status));
@ -3385,13 +3361,16 @@ bool dp_verify_link_cap_with_retries(
{
int i = 0;
bool success = false;
int fail_count = 0;
dp_trace_detect_lt_init(link);
if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C &&
link->dc->debug.usbc_combo_phy_reset_wa)
apply_usbc_combo_phy_reset_wa(link, known_limit_link_setting);
dp_trace_set_lt_start_timestamp(link, false);
for (i = 0; i < attempts; i++) {
int fail_count = 0;
enum dc_connection_type type = dc_connection_none;
memset(&link->verified_link_cap, 0,
@ -3406,6 +3385,10 @@ bool dp_verify_link_cap_with_retries(
}
msleep(10);
}
dp_trace_lt_fail_count_update(link, fail_count, true);
dp_trace_set_lt_end_timestamp(link, true);
return success;
}
@ -3495,7 +3478,8 @@ static enum dc_lane_count increase_lane_count(enum dc_lane_count lane_count)
}
}
static enum dc_link_rate increase_link_rate(enum dc_link_rate link_rate)
static enum dc_link_rate increase_link_rate(struct dc_link *link,
enum dc_link_rate link_rate)
{
switch (link_rate) {
case LINK_RATE_LOW:
@ -3507,7 +3491,15 @@ static enum dc_link_rate increase_link_rate(enum dc_link_rate link_rate)
case LINK_RATE_HIGH3:
return LINK_RATE_UHBR10;
case LINK_RATE_UHBR10:
return LINK_RATE_UHBR13_5;
/* upto DP2.x specs UHBR13.5 is the only link rate that could be
* not supported by DPRX when higher link rate is supported.
* so we treat it as a special case for code simplicity. When we
* have new specs with more link rates like this, we should
* consider a more generic solution to handle discrete link
* rate capabilities.
*/
return link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 ?
LINK_RATE_UHBR13_5 : LINK_RATE_UHBR20;
case LINK_RATE_UHBR13_5:
return LINK_RATE_UHBR20;
default:
@ -3516,12 +3508,17 @@ static enum dc_link_rate increase_link_rate(enum dc_link_rate link_rate)
}
static bool decide_fallback_link_setting_max_bw_policy(
struct dc_link *link,
const struct dc_link_settings *max,
struct dc_link_settings *cur)
struct dc_link_settings *cur,
enum link_training_result training_result)
{
uint8_t cur_idx = 0, next_idx;
bool found = false;
if (training_result == LINK_TRAINING_ABORT)
return false;
while (cur_idx < ARRAY_SIZE(dp_lt_fallbacks))
/* find current index */
if (dp_lt_fallbacks[cur_idx].lane_count == cur->lane_count &&
@ -3534,11 +3531,22 @@ static bool decide_fallback_link_setting_max_bw_policy(
while (next_idx < ARRAY_SIZE(dp_lt_fallbacks))
/* find next index */
if (dp_lt_fallbacks[next_idx].lane_count <= max->lane_count &&
dp_lt_fallbacks[next_idx].link_rate <= max->link_rate)
break;
else
if (dp_lt_fallbacks[next_idx].lane_count > max->lane_count ||
dp_lt_fallbacks[next_idx].link_rate > max->link_rate)
next_idx++;
else if (dp_lt_fallbacks[next_idx].link_rate == LINK_RATE_UHBR13_5 &&
link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 == 0)
/* upto DP2.x specs UHBR13.5 is the only link rate that
* could be not supported by DPRX when higher link rate
* is supported. so we treat it as a special case for
* code simplicity. When we have new specs with more
* link rates like this, we should consider a more
* generic solution to handle discrete link rate
* capabilities.
*/
next_idx++;
else
break;
if (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) {
cur->lane_count = dp_lt_fallbacks[next_idx].lane_count;
@ -3567,8 +3575,8 @@ static bool decide_fallback_link_setting(
return false;
if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING ||
link->dc->debug.force_dp2_lt_fallback_method)
return decide_fallback_link_setting_max_bw_policy(&initial_link_settings,
current_link_setting);
return decide_fallback_link_setting_max_bw_policy(link, &initial_link_settings,
current_link_setting, training_result);
switch (training_result) {
case LINK_TRAINING_CR_FAIL_LANE0:
@ -3723,7 +3731,7 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
current_link_setting.lane_count);
} else {
current_link_setting.link_rate =
increase_link_rate(
increase_link_rate(link,
current_link_setting.link_rate);
current_link_setting.lane_count =
initial_link_setting.lane_count;
@ -3838,7 +3846,7 @@ static bool decide_edp_link_settings_with_dsc(struct dc_link *link,
/* minimize lane */
if (current_link_setting.link_rate < max_link_rate) {
current_link_setting.link_rate =
increase_link_rate(
increase_link_rate(link,
current_link_setting.link_rate);
} else {
if (current_link_setting.lane_count <
@ -3859,7 +3867,7 @@ static bool decide_edp_link_settings_with_dsc(struct dc_link *link,
current_link_setting.lane_count);
} else {
current_link_setting.link_rate =
increase_link_rate(
increase_link_rate(link,
current_link_setting.link_rate);
current_link_setting.lane_count =
initial_link_setting.lane_count;
@ -4136,7 +4144,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
vendor_specific_lttpr_wa_three_dpcd(
dp_fixed_vs_pe_read_lane_adjust(
link,
link_training_settings.dpcd_lane_settings);
@ -4672,6 +4680,8 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
status = false;
if (out_link_loss)
*out_link_loss = true;
dp_trace_link_loss_increment(link);
}
if (link->type == dc_connection_sst_branch &&
@ -5108,6 +5118,7 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
sizeof(lttpr_dpcd_data));
if (status != DC_OK) {
DC_LOG_DP2("%s: Read LTTPR caps data failed.\n", __func__);
link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
return false;
}

View File

@ -272,6 +272,13 @@ void link_enc_cfg_init(
state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
}
void link_enc_cfg_copy(const struct dc_state *src_ctx, struct dc_state *dst_ctx)
{
memcpy(&dst_ctx->res_ctx.link_enc_cfg_ctx,
&src_ctx->res_ctx.link_enc_cfg_ctx,
sizeof(dst_ctx->res_ctx.link_enc_cfg_ctx));
}
void link_enc_cfg_link_encs_assign(
struct dc *dc,
struct dc_state *state,

View File

@ -2335,6 +2335,9 @@ void dc_resource_state_construct(
bool dc_resource_is_dsc_encoding_supported(const struct dc *dc)
{
if (dc->res_pool == NULL)
return false;
return dc->res_pool->res_cap->num_dsc > 0;
}

View File

@ -718,6 +718,20 @@ enum dc_status dc_stream_add_dsc_to_resource(struct dc *dc,
}
}
struct pipe_ctx *dc_stream_get_pipe_ctx(struct dc_stream_state *stream)
{
int i = 0;
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe = &stream->ctx->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream == stream)
return pipe;
}
return NULL;
}
void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
{
DC_LOG_DC(

View File

@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
#define DC_VER "3.2.175"
#define DC_VER "3.2.177"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@ -1206,6 +1206,7 @@ struct dpcd_caps {
/* dongle type (DP converter, CV smart dongle) */
enum display_dongle_type dongle_type;
bool is_dongle_type_one;
/* branch device or sink device */
bool is_branch_dev;
/* Dongle's downstream count. */

View File

@ -76,6 +76,28 @@ struct link_trace {
struct time_stamp time_stamp;
};
struct dp_trace_lt_counts {
unsigned int total;
unsigned int fail;
};
struct dp_trace_lt {
struct dp_trace_lt_counts counts;
struct dp_trace_timestamps {
unsigned long long start;
unsigned long long end;
} timestamps;
enum link_training_result result;
bool is_logged;
};
struct dp_trace {
struct dp_trace_lt detect_lt_trace;
struct dp_trace_lt commit_lt_trace;
unsigned int link_loss_count;
bool is_initialized;
};
/* PSR feature flags */
struct psr_settings {
bool psr_feature_enabled; // PSR is supported by sink
@ -121,6 +143,8 @@ struct dc_link {
bool edp_sink_present;
struct dp_trace dp_trace;
/* caps is the same as reported_link_cap. link_traing use
* reported_link_cap. Will clean up. TODO
*/
@ -442,6 +466,11 @@ const struct dc_link_settings *dc_link_get_link_cap(
void dc_link_overwrite_extended_receiver_cap(
struct dc_link *link);
bool dc_is_oem_i2c_device_present(
struct dc *dc,
size_t slave_address
);
bool dc_submit_i2c(
struct dc *dc,
uint32_t link_index,
@ -470,4 +499,16 @@ void dc_link_clear_dprx_states(struct dc_link *link);
struct gpio *get_hpd_gpio(struct dc_bios *dcb,
struct graphics_object_id link_id,
struct gpio_service *gpio_service);
void dp_trace_reset(struct dc_link *link);
bool dc_dp_trace_is_initialized(struct dc_link *link);
unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link,
bool in_detection);
void dc_dp_trace_set_is_logged_flag(struct dc_link *link,
bool in_detection,
bool is_logged);
bool dc_dp_trace_is_logged(struct dc_link *link,
bool in_detection);
struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
bool in_detection);
unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link);
#endif /* DC_LINK_H_ */

View File

@ -524,4 +524,6 @@ bool dc_stream_get_crtc_position(struct dc *dc,
unsigned int *v_pos,
unsigned int *nom_v_pos);
struct pipe_ctx *dc_stream_get_pipe_ctx(struct dc_stream_state *stream);
#endif /* DC_STREAM_H_ */

View File

@ -138,6 +138,7 @@ enum dc_edid_status {
EDID_BAD_CHECKSUM,
EDID_THE_SAME,
EDID_FALL_BACK,
EDID_PARTIAL_VALID,
};
enum act_return_status {

View File

@ -413,7 +413,8 @@ static bool acquire(
return false;
if (!acquire_engine(engine)) {
dal_ddc_close(ddc);
engine->ddc = ddc;
release_engine(engine);
return false;
}

View File

@ -450,6 +450,8 @@ void dce_clock_read_ss_info(struct dce_clk_mgr *clk_mgr_dce)
clk_mgr_dce->dprefclk_ss_percentage =
info.spread_spectrum_percentage;
}
if (clk_mgr_dce->base.ctx->dc->debug.ignore_dpref_ss)
clk_mgr_dce->dprefclk_ss_percentage = 0;
}
}
}

View File

@ -971,6 +971,98 @@ static bool dce112_program_pix_clk(
return true;
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
static bool dcn31_program_pix_clk(
struct clock_source *clock_source,
struct pixel_clk_params *pix_clk_params,
struct pll_settings *pll_settings)
{
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz;
const struct pixel_rate_range_table_entry *e =
look_up_in_video_optimized_rate_tlb(pix_clk_params->requested_pix_clk_100hz / 10);
struct bp_pixel_clock_parameters bp_pc_params = {0};
enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
// For these signal types Driver to program DP_DTO without calling VBIOS Command table
if (dc_is_dp_signal(pix_clk_params->signal_type)) {
if (e) {
/* Set DTO values: phase = target clock, modulo = reference clock*/
REG_WRITE(PHASE[inst], e->target_pixel_rate_khz * e->mult_factor);
REG_WRITE(MODULO[inst], dp_dto_ref_khz * e->div_factor);
} else {
/* Set DTO values: phase = target clock, modulo = reference clock*/
REG_WRITE(PHASE[inst], pll_settings->actual_pix_clk_100hz * 100);
REG_WRITE(MODULO[inst], dp_dto_ref_khz * 1000);
}
REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1);
} else {
if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
unsigned dp_dto_ref_100hz = 7000000;
unsigned clock_100hz = pll_settings->actual_pix_clk_100hz;
/* Set DTO values: phase = target clock, modulo = reference clock */
REG_WRITE(PHASE[inst], clock_100hz);
REG_WRITE(MODULO[inst], dp_dto_ref_100hz);
/* Enable DTO */
REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1);
return true;
}
/*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/
bp_pc_params.controller_id = pix_clk_params->controller_id;
bp_pc_params.pll_id = clock_source->id;
bp_pc_params.target_pixel_clock_100hz = pll_settings->actual_pix_clk_100hz;
bp_pc_params.encoder_object_id = pix_clk_params->encoder_object_id;
bp_pc_params.signal_type = pix_clk_params->signal_type;
// Make sure we send the correct color depth to DMUB for HDMI
if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A) {
switch (pix_clk_params->color_depth) {
case COLOR_DEPTH_888:
bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
break;
case COLOR_DEPTH_101010:
bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_30;
break;
case COLOR_DEPTH_121212:
bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_36;
break;
case COLOR_DEPTH_161616:
bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_48;
break;
default:
bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
break;
}
bp_pc_params.color_depth = bp_pc_colour_depth;
}
if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
bp_pc_params.flags.SET_GENLOCK_REF_DIV_SRC =
pll_settings->use_external_clk;
bp_pc_params.flags.SET_XTALIN_REF_SRC =
!pll_settings->use_external_clk;
if (pix_clk_params->flags.SUPPORT_YCBCR420) {
bp_pc_params.flags.SUPPORT_YUV_420 = 1;
}
}
if (clk_src->bios->funcs->set_pixel_clock(
clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
return false;
/* Resync deep color DTO */
if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO)
dce112_program_pixel_clk_resync(clk_src,
pix_clk_params->signal_type,
pix_clk_params->color_depth,
pix_clk_params->flags.SUPPORT_YCBCR420);
}
return true;
}
#endif
static bool dce110_clock_source_power_down(
struct clock_source *clk_src)
@ -1205,6 +1297,13 @@ static const struct clock_source_funcs dcn3_clk_src_funcs = {
.get_pix_clk_dividers = dcn3_get_pix_clk_dividers,
.get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz
};
static const struct clock_source_funcs dcn31_clk_src_funcs = {
.cs_power_down = dce110_clock_source_power_down,
.program_pix_clk = dcn31_program_pix_clk,
.get_pix_clk_dividers = dcn3_get_pix_clk_dividers,
.get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz
};
#endif
/*****************************************/
/* Constructor */
@ -1609,6 +1708,24 @@ bool dcn3_clk_src_construct(
}
#endif
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool dcn31_clk_src_construct(
struct dce110_clk_src *clk_src,
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
const struct dce110_clk_src_regs *regs,
const struct dce110_clk_src_shift *cs_shift,
const struct dce110_clk_src_mask *cs_mask)
{
bool ret = dce112_clk_src_construct(clk_src, ctx, bios, id, regs, cs_shift, cs_mask);
clk_src->base.funcs = &dcn31_clk_src_funcs;
return ret;
}
#endif
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool dcn301_clk_src_construct(
struct dce110_clk_src *clk_src,

View File

@ -292,6 +292,15 @@ bool dcn301_clk_src_construct(
const struct dce110_clk_src_regs *regs,
const struct dce110_clk_src_shift *cs_shift,
const struct dce110_clk_src_mask *cs_mask);
bool dcn31_clk_src_construct(
struct dce110_clk_src *clk_src,
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
const struct dce110_clk_src_regs *regs,
const struct dce110_clk_src_shift *cs_shift,
const struct dce110_clk_src_mask *cs_mask);
#endif
/* this table is use to find *1.001 and /1.001 pixel rates from non-precise pixel rate */

View File

@ -25,6 +25,32 @@
#include "dce_i2c.h"
#include "reg_helper.h"
bool dce_i2c_oem_device_present(
struct resource_pool *pool,
struct ddc_service *ddc,
size_t slave_address
)
{
struct dc *dc = ddc->ctx->dc;
struct dc_bios *dcb = dc->ctx->dc_bios;
struct graphics_object_id id = {0};
struct graphics_object_i2c_info i2c_info;
if (!dc->ctx->dc_bios->fw_info.oem_i2c_present)
return false;
id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
id.enum_id = 0;
id.type = OBJECT_TYPE_GENERIC;
if (dcb->funcs->get_i2c_info(dcb, id, &i2c_info) != BP_RESULT_OK)
return false;
if (i2c_info.i2c_slave_address != slave_address)
return false;
return true;
}
bool dce_i2c_submit_command(
struct resource_pool *pool,
struct ddc *ddc,

View File

@ -30,6 +30,12 @@
#include "dce_i2c_hw.h"
#include "dce_i2c_sw.h"
bool dce_i2c_oem_device_present(
struct resource_pool *pool,
struct ddc_service *ddc,
size_t slave_address
);
bool dce_i2c_submit_command(
struct resource_pool *pool,
struct ddc *ddc,

View File

@ -940,6 +940,7 @@ static const struct hubbub_funcs hubbub1_funcs = {
.program_watermarks = hubbub1_program_watermarks,
.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
.verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
};
void hubbub1_construct(struct hubbub *hubbub,

View File

@ -1311,6 +1311,20 @@ void hubp1_set_flip_int(struct hubp *hubp)
return;
}
/**
* hubp1_wait_pipe_read_start - wait for hubp ret path starting read.
*
* @hubp: hubp struct reference.
*/
void hubp1_wait_pipe_read_start(struct hubp *hubp)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
REG_WAIT(HUBPRET_READ_LINE_STATUS,
PIPE_READ_VBLANK, 0,
1, 1000);
}
void hubp1_init(struct hubp *hubp)
{
//do nothing
@ -1345,6 +1359,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_soft_reset = hubp1_soft_reset,
.hubp_in_blank = hubp1_in_blank,
.hubp_set_flip_int = hubp1_set_flip_int,
.hubp_wait_pipe_read_start = hubp1_wait_pipe_read_start,
};
/*****************************************/

View File

@ -76,6 +76,7 @@
SRI(DCSURF_SURFACE_CONTROL, HUBPREQ, id),\
SRI(DCSURF_SURFACE_FLIP_INTERRUPT, HUBPREQ, id),\
SRI(HUBPRET_CONTROL, HUBPRET, id),\
SRI(HUBPRET_READ_LINE_STATUS, HUBPRET, id),\
SRI(DCN_EXPANSION_MODE, HUBPREQ, id),\
SRI(DCHUBP_REQ_SIZE_CONFIG, HUBP, id),\
SRI(DCHUBP_REQ_SIZE_CONFIG_C, HUBP, id),\
@ -186,6 +187,7 @@
uint32_t DCSURF_SURFACE_CONTROL; \
uint32_t DCSURF_SURFACE_FLIP_INTERRUPT; \
uint32_t HUBPRET_CONTROL; \
uint32_t HUBPRET_READ_LINE_STATUS; \
uint32_t DCN_EXPANSION_MODE; \
uint32_t DCHUBP_REQ_SIZE_CONFIG; \
uint32_t DCHUBP_REQ_SIZE_CONFIG_C; \
@ -338,6 +340,7 @@
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_STATUS, PIPE_READ_VBLANK, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, DRQ_EXPANSION_MODE, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, PRQ_EXPANSION_MODE, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, MRQ_EXPANSION_MODE, mask_sh),\
@ -538,6 +541,7 @@
type DET_BUF_PLANE1_BASE_ADDRESS;\
type CROSSBAR_SRC_CB_B;\
type CROSSBAR_SRC_CR_R;\
type PIPE_READ_VBLANK;\
type DRQ_EXPANSION_MODE;\
type PRQ_EXPANSION_MODE;\
type MRQ_EXPANSION_MODE;\

View File

@ -1112,9 +1112,13 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
void dcn10_verify_allow_pstate_change_high(struct dc *dc)
{
struct hubbub *hubbub = dc->res_pool->hubbub;
static bool should_log_hw_state; /* prevent hw state log by default */
if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) {
if (!hubbub->funcs->verify_allow_pstate_change_high)
return;
if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
int i = 0;
if (should_log_hw_state)
@ -1123,8 +1127,8 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc)
TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
BREAK_TO_DEBUGGER();
if (dcn10_hw_wa_force_recovery(dc)) {
/*check again*/
if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
/*check again*/
if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
BREAK_TO_DEBUGGER();
}
}

View File

@ -70,68 +70,6 @@
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
.rob_buffer_size_kbytes = 64,
.det_buffer_size_kbytes = 164,
.dpte_buffer_size_in_pte_reqs_luma = 42,
.dpp_output_buffer_pixels = 2560,
.opp_output_buffer_lines = 1,
.pixel_chunk_size_kbytes = 8,
.pte_enable = 1,
.pte_chunk_size_kbytes = 2,
.meta_chunk_size_kbytes = 2,
.writeback_chunk_size_kbytes = 2,
.line_buffer_size_bits = 589824,
.max_line_buffer_lines = 12,
.IsLineBufferBppFixed = 0,
.LineBufferFixedBpp = -1,
.writeback_luma_buffer_size_kbytes = 12,
.writeback_chroma_buffer_size_kbytes = 8,
.max_num_dpp = 4,
.max_num_wb = 2,
.max_dchub_pscl_bw_pix_per_clk = 4,
.max_pscl_lb_bw_pix_per_clk = 2,
.max_lb_vscl_bw_pix_per_clk = 4,
.max_vscl_hscl_bw_pix_per_clk = 4,
.max_hscl_ratio = 4,
.max_vscl_ratio = 4,
.hscl_mults = 4,
.vscl_mults = 4,
.max_hscl_taps = 8,
.max_vscl_taps = 8,
.dispclk_ramp_margin_percent = 1,
.underscan_factor = 1.10,
.min_vblank_lines = 14,
.dppclk_delay_subtotal = 90,
.dispclk_delay_subtotal = 42,
.dcfclk_cstate_latency = 10,
.max_inter_dcn_tile_repeaters = 8,
.can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0,
.bug_forcing_LC_req_same_size_fixed = 0,
};
const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = {
.sr_exit_time_us = 9.0,
.sr_enter_plus_exit_time_us = 11.0,
.urgent_latency_us = 4.0,
.writeback_latency_us = 12.0,
.ideal_dram_bw_after_urgent_percent = 80.0,
.max_request_size_bytes = 256,
.downspread_percent = 0.5,
.dram_page_open_time_ns = 50.0,
.dram_rw_turnaround_time_ns = 17.5,
.dram_return_buffer_per_channel_bytes = 8192,
.round_trip_ping_latency_dcfclk_cycles = 128,
.urgent_out_of_order_return_per_channel_bytes = 256,
.channel_interleave_bytes = 256,
.num_banks = 8,
.num_chans = 2,
.vmm_page_size_bytes = 4096,
.dram_clock_change_latency_us = 17.0,
.writeback_dram_clock_change_latency_us = 23.0,
.return_bus_width_bytes = 64,
};
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
#define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
@ -925,6 +863,21 @@ static struct dce_hwseq *dcn10_hwseq_create(
hws->wa.DEGVIDCN10_253 = true;
hws->wa.false_optc_underflow = true;
hws->wa.DEGVIDCN10_254 = true;
if ((ctx->asic_id.chip_family == FAMILY_RV) &&
ASICREV_IS_RAVEN2(ctx->asic_id.hw_internal_rev))
switch (ctx->asic_id.pci_revision_id) {
case PRID_POLLOCK_94:
case PRID_POLLOCK_95:
case PRID_POLLOCK_E9:
case PRID_POLLOCK_EA:
case PRID_POLLOCK_EB:
hws->wa.wait_hubpret_read_start_during_mpo_transition = true;
break;
default:
hws->wa.wait_hubpret_read_start_during_mpo_transition = false;
break;
}
}
return hws;
}

View File

@ -27,6 +27,7 @@
#define __DC_RESOURCE_DCN10_H__
#include "core_types.h"
#include "dml/dcn10/dcn10_fpu.h"
#define TO_DCN10_RES_POOL(pool)\
container_of(pool, struct dcn10_resource_pool, base)
@ -35,6 +36,9 @@ struct dc;
struct resource_pool;
struct _vcs_dpi_display_pipe_params_st;
extern struct _vcs_dpi_ip_params_st dcn1_0_ip;
extern struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc;
struct dcn10_resource_pool {
struct resource_pool base;
};

View File

@ -9,31 +9,6 @@ DCN20 = dcn20_resource.o dcn20_init.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o d
DCN20 += dcn20_dsc.o
ifdef CONFIG_X86
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse
endif
ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -maltivec
endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
endif
endif
ifdef CONFIG_X86
ifdef IS_OLD_GCC
# Stack alignment mismatch, proceed with caution.
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
# (8B stack alignment).
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -mpreferred-stack-boundary=4
else
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -msse2
endif
endif
AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN20)

View File

@ -1739,6 +1739,16 @@ void dcn20_program_front_end_for_ctx(
|| pipe->stream->update_flags.raw)
&& hws->funcs.program_all_writeback_pipes_in_tree)
hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
/* Avoid underflow by check of pipe line read when adding 2nd plane. */
if (hws->wa.wait_hubpret_read_start_during_mpo_transition &&
!pipe->top_pipe &&
pipe->stream &&
pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start &&
dc->current_state->stream_status[0].plane_count == 1 &&
context->stream_status[0].plane_count > 1) {
pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -27,6 +27,7 @@
#define __DC_RESOURCE_DCN20_H__
#include "core_types.h"
#include "dml/dcn20/dcn20_fpu.h"
#define TO_DCN20_RES_POOL(pool)\
container_of(pool, struct dcn20_resource_pool, base)
@ -35,6 +36,12 @@ struct dc;
struct resource_pool;
struct _vcs_dpi_display_pipe_params_st;
extern struct _vcs_dpi_ip_params_st dcn2_0_ip;
extern struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip;
extern struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc;
extern struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc;
extern struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc;
struct dcn20_resource_pool {
struct resource_pool base;
};
@ -49,11 +56,7 @@ unsigned int dcn20_calc_max_scaled_time(
unsigned int time_per_pixel,
enum mmhubbub_wbif_mode mode,
unsigned int urgent_watermark);
int dcn20_populate_dml_pipes_from_context(
struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate);
struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer(
struct dc_state *state,
const struct resource_pool *pool,
@ -79,7 +82,6 @@ struct dpp *dcn20_dpp_create(
struct input_pixel_processor *dcn20_ipp_create(
struct dc_context *ctx, uint32_t inst);
struct output_pixel_processor *dcn20_opp_create(
struct dc_context *ctx, uint32_t inst);
@ -96,11 +98,6 @@ struct display_stream_compressor *dcn20_dsc_create(
struct dc_context *ctx, uint32_t inst);
void dcn20_dsc_destroy(struct display_stream_compressor **dsc);
void dcn20_cap_soc_clocks(
struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table max_clocks);
void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states);
struct hubp *dcn20_hubp_create(
struct dc_context *ctx,
uint32_t inst);
@ -158,11 +155,6 @@ bool dcn20_fast_validate_bw(
int *pipe_split_from,
int *vlevel_out,
bool fast_validate);
void dcn20_calculate_dlg_params(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
int vlevel);
enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream);
enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
@ -170,12 +162,5 @@ enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *
enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state);
void dcn20_patch_bounding_box(
struct dc *dc,
struct _vcs_dpi_soc_bounding_box_st *bb);
void dcn20_cap_soc_clocks(
struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table max_clocks);
#endif /* __DC_RESOURCE_DCN20_H__ */

View File

@ -5,31 +5,6 @@
DCN21 = dcn21_init.o dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o \
dcn21_hwseq.o dcn21_link_encoder.o dcn21_dccg.o
ifdef CONFIG_X86
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse
endif
ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -maltivec
endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
endif
endif
ifdef CONFIG_X86
ifdef IS_OLD_GCC
# Stack alignment mismatch, proceed with caution.
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
# (8B stack alignment).
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -mpreferred-stack-boundary=4
else
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2
endif
endif
AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN21)

View File

@ -34,6 +34,7 @@
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
#include "dml/dcn20/dcn20_fpu.h"
@ -89,230 +90,6 @@
#include "dce/dmub_psr.h"
#include "dce/dmub_abm.h"
#define DC_LOGGER_INIT(logger)
struct _vcs_dpi_ip_params_st dcn2_1_ip = {
.odm_capable = 1,
.gpuvm_enable = 1,
.hostvm_enable = 1,
.gpuvm_max_page_table_levels = 1,
.hostvm_max_page_table_levels = 4,
.hostvm_cached_page_table_levels = 2,
.num_dsc = 3,
.rob_buffer_size_kbytes = 168,
.det_buffer_size_kbytes = 164,
.dpte_buffer_size_in_pte_reqs_luma = 44,
.dpte_buffer_size_in_pte_reqs_chroma = 42,//todo
.dpp_output_buffer_pixels = 2560,
.opp_output_buffer_lines = 1,
.pixel_chunk_size_kbytes = 8,
.pte_enable = 1,
.max_page_table_levels = 4,
.pte_chunk_size_kbytes = 2,
.meta_chunk_size_kbytes = 2,
.min_meta_chunk_size_bytes = 256,
.writeback_chunk_size_kbytes = 2,
.line_buffer_size_bits = 789504,
.is_line_buffer_bpp_fixed = 0,
.line_buffer_fixed_bpp = 0,
.dcc_supported = true,
.max_line_buffer_lines = 12,
.writeback_luma_buffer_size_kbytes = 12,
.writeback_chroma_buffer_size_kbytes = 8,
.writeback_chroma_line_buffer_width_pixels = 4,
.writeback_max_hscl_ratio = 1,
.writeback_max_vscl_ratio = 1,
.writeback_min_hscl_ratio = 1,
.writeback_min_vscl_ratio = 1,
.writeback_max_hscl_taps = 12,
.writeback_max_vscl_taps = 12,
.writeback_line_buffer_luma_buffer_size = 0,
.writeback_line_buffer_chroma_buffer_size = 14643,
.cursor_buffer_size = 8,
.cursor_chunk_size = 2,
.max_num_otg = 4,
.max_num_dpp = 4,
.max_num_wb = 1,
.max_dchub_pscl_bw_pix_per_clk = 4,
.max_pscl_lb_bw_pix_per_clk = 2,
.max_lb_vscl_bw_pix_per_clk = 4,
.max_vscl_hscl_bw_pix_per_clk = 4,
.max_hscl_ratio = 4,
.max_vscl_ratio = 4,
.hscl_mults = 4,
.vscl_mults = 4,
.max_hscl_taps = 8,
.max_vscl_taps = 8,
.dispclk_ramp_margin_percent = 1,
.underscan_factor = 1.10,
.min_vblank_lines = 32, //
.dppclk_delay_subtotal = 77, //
.dppclk_delay_scl_lb_only = 16,
.dppclk_delay_scl = 50,
.dppclk_delay_cnvc_formatter = 8,
.dppclk_delay_cnvc_cursor = 6,
.dispclk_delay_subtotal = 87, //
.dcfclk_cstate_latency = 10, // SRExitTime
.max_inter_dcn_tile_repeaters = 8,
.xfc_supported = false,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
.ptoi_supported = 0,
.number_of_cursors = 1,
};
struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.clock_limits = {
{
.state = 0,
.dcfclk_mhz = 400.0,
.fabricclk_mhz = 400.0,
.dispclk_mhz = 600.0,
.dppclk_mhz = 400.00,
.phyclk_mhz = 600.0,
.socclk_mhz = 278.0,
.dscclk_mhz = 205.67,
.dram_speed_mts = 1600.0,
},
{
.state = 1,
.dcfclk_mhz = 464.52,
.fabricclk_mhz = 800.0,
.dispclk_mhz = 654.55,
.dppclk_mhz = 626.09,
.phyclk_mhz = 600.0,
.socclk_mhz = 278.0,
.dscclk_mhz = 205.67,
.dram_speed_mts = 1600.0,
},
{
.state = 2,
.dcfclk_mhz = 514.29,
.fabricclk_mhz = 933.0,
.dispclk_mhz = 757.89,
.dppclk_mhz = 685.71,
.phyclk_mhz = 600.0,
.socclk_mhz = 278.0,
.dscclk_mhz = 287.67,
.dram_speed_mts = 1866.0,
},
{
.state = 3,
.dcfclk_mhz = 576.00,
.fabricclk_mhz = 1067.0,
.dispclk_mhz = 847.06,
.dppclk_mhz = 757.89,
.phyclk_mhz = 600.0,
.socclk_mhz = 715.0,
.dscclk_mhz = 318.334,
.dram_speed_mts = 2134.0,
},
{
.state = 4,
.dcfclk_mhz = 626.09,
.fabricclk_mhz = 1200.0,
.dispclk_mhz = 900.00,
.dppclk_mhz = 847.06,
.phyclk_mhz = 810.0,
.socclk_mhz = 953.0,
.dscclk_mhz = 489.0,
.dram_speed_mts = 2400.0,
},
{
.state = 5,
.dcfclk_mhz = 685.71,
.fabricclk_mhz = 1333.0,
.dispclk_mhz = 1028.57,
.dppclk_mhz = 960.00,
.phyclk_mhz = 810.0,
.socclk_mhz = 278.0,
.dscclk_mhz = 287.67,
.dram_speed_mts = 2666.0,
},
{
.state = 6,
.dcfclk_mhz = 757.89,
.fabricclk_mhz = 1467.0,
.dispclk_mhz = 1107.69,
.dppclk_mhz = 1028.57,
.phyclk_mhz = 810.0,
.socclk_mhz = 715.0,
.dscclk_mhz = 318.334,
.dram_speed_mts = 3200.0,
},
{
.state = 7,
.dcfclk_mhz = 847.06,
.fabricclk_mhz = 1600.0,
.dispclk_mhz = 1395.0,
.dppclk_mhz = 1285.00,
.phyclk_mhz = 1325.0,
.socclk_mhz = 953.0,
.dscclk_mhz = 489.0,
.dram_speed_mts = 4266.0,
},
/*Extra state, no dispclk ramping*/
{
.state = 8,
.dcfclk_mhz = 847.06,
.fabricclk_mhz = 1600.0,
.dispclk_mhz = 1395.0,
.dppclk_mhz = 1285.0,
.phyclk_mhz = 1325.0,
.socclk_mhz = 953.0,
.dscclk_mhz = 489.0,
.dram_speed_mts = 4266.0,
},
},
.sr_exit_time_us = 12.5,
.sr_enter_plus_exit_time_us = 17.0,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 75.0,
.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
.max_avg_sdp_bw_use_normal_percent = 60.0,
.max_avg_dram_bw_use_normal_percent = 100.0,
.writeback_latency_us = 12.0,
.max_request_size_bytes = 256,
.dram_channel_width_bytes = 4,
.fabric_datapath_to_dcn_data_return_bytes = 32,
.dcn_downspread_percent = 0.5,
.downspread_percent = 0.38,
.dram_page_open_time_ns = 50.0,
.dram_rw_turnaround_time_ns = 17.5,
.dram_return_buffer_per_channel_bytes = 8192,
.round_trip_ping_latency_dcfclk_cycles = 128,
.urgent_out_of_order_return_per_channel_bytes = 4096,
.channel_interleave_bytes = 256,
.num_banks = 8,
.num_chans = 4,
.vmm_page_size_bytes = 4096,
.dram_clock_change_latency_us = 23.84,
.return_bus_width_bytes = 64,
.dispclk_dppclk_vco_speed_mhz = 3600,
.xfc_bus_transport_time_us = 4,
.xfc_xbuf_latency_tolerance_us = 4,
.use_urgent_burst_bw = 1,
.num_states = 8
};
#ifndef MAX
#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
#endif
#ifndef MIN
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#endif
/* begin *********************
* macros to expend register list macro defined in HW object header file */
@ -705,12 +482,6 @@ static const struct dcn10_stream_encoder_mask se_mask = {
static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
static int dcn21_populate_dml_pipes_from_context(
struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate);
static struct input_pixel_processor *dcn21_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
@ -1029,163 +800,13 @@ static void dcn21_resource_destruct(struct dcn21_resource_pool *pool)
dcn21_pp_smu_destroy(&pool->base.pp_smu);
}
static void calculate_wm_set_for_vlevel(
int vlevel,
struct wm_range_table_entry *table_entry,
struct dcn_watermarks *wm_set,
struct display_mode_lib *dml,
display_e2e_pipe_params_st *pipes,
int pipe_cnt)
{
double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
ASSERT(vlevel < dml->soc.num_states);
/* only pipe 0 is read for voltage and dcf/soc clocks */
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
}
static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
{
int i;
if (dc->bb_overrides.sr_exit_time_ns) {
for (i = 0; i < WM_SET_COUNT; i++) {
dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us =
dc->bb_overrides.sr_exit_time_ns / 1000.0;
}
}
if (dc->bb_overrides.sr_enter_plus_exit_time_ns) {
for (i = 0; i < WM_SET_COUNT; i++) {
dc->clk_mgr->bw_params->wm_table.entries[i].sr_enter_plus_exit_time_us =
dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
}
}
if (dc->bb_overrides.urgent_latency_ns) {
bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
}
if (dc->bb_overrides.dram_clock_change_latency_ns) {
for (i = 0; i < WM_SET_COUNT; i++) {
dc->clk_mgr->bw_params->wm_table.entries[i].pstate_latency_us =
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
}
}
static void dcn21_calculate_wm(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *out_pipe_cnt,
int *pipe_split_from,
int vlevel_req,
bool fast_validate)
{
int pipe_cnt, i, pipe_idx;
int vlevel, vlevel_max;
struct wm_range_table_entry *table_entry;
struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
ASSERT(bw_params);
patch_bounding_box(dc, &context->bw_ctx.dml.soc);
for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.vba.RequiredDISPCLK[vlevel_req][context->bw_ctx.dml.vba.maxMpcComb];
if (pipe_split_from[i] < 0) {
pipes[pipe_cnt].clks_cfg.dppclk_mhz =
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel_req][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx];
if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx)
pipes[pipe_cnt].pipe.dest.odm_combine =
context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel_req][pipe_idx];
else
pipes[pipe_cnt].pipe.dest.odm_combine = 0;
pipe_idx++;
} else {
pipes[pipe_cnt].clks_cfg.dppclk_mhz =
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel_req][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]];
if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i])
pipes[pipe_cnt].pipe.dest.odm_combine =
context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel_req][pipe_split_from[i]];
else
pipes[pipe_cnt].pipe.dest.odm_combine = 0;
}
pipe_cnt++;
}
if (pipe_cnt != pipe_idx) {
if (dc->res_pool->funcs->populate_dml_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
context, pipes, fast_validate);
else
pipe_cnt = dcn21_populate_dml_pipes_from_context(dc,
context, pipes, fast_validate);
}
*out_pipe_cnt = pipe_cnt;
vlevel_max = bw_params->clk_table.num_entries - 1;
/* WM Set D */
table_entry = &bw_params->wm_table.entries[WM_D];
if (table_entry->wm_type == WM_TYPE_RETRAINING)
vlevel = 0;
else
vlevel = vlevel_max;
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set C */
table_entry = &bw_params->wm_table.entries[WM_C];
vlevel = MIN(MAX(vlevel_req, 3), vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set B */
table_entry = &bw_params->wm_table.entries[WM_B];
vlevel = MIN(MAX(vlevel_req, 2), vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set A */
table_entry = &bw_params->wm_table.entries[WM_A];
vlevel = MIN(vlevel_req, vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
&context->bw_ctx.dml, pipes, pipe_cnt);
}
static bool dcn21_fast_validate_bw(
struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
bool fast_validate)
bool dcn21_fast_validate_bw(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
bool fast_validate)
{
bool out = false;
int split[MAX_PIPES] = { 0 };
@ -1197,7 +818,9 @@ static bool dcn21_fast_validate_bw(
dcn20_merge_pipes_for_validate(dc, context);
DC_FP_START();
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
DC_FP_END();
*pipe_cnt_out = pipe_cnt;
@ -1287,7 +910,9 @@ static bool dcn21_fast_validate_bw(
hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
ASSERT(hsplit_pipe);
if (!hsplit_pipe) {
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] *= 2;
DC_FP_START();
dcn20_fpu_adjust_dppclk(&context->bw_ctx.dml.vba, vlevel, context->bw_ctx.dml.vba.maxMpcComb, pipe_idx, true);
DC_FP_END();
continue;
}
if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
@ -1329,61 +954,6 @@ validate_out:
return out;
}
static noinline bool dcn21_validate_bandwidth_fp(struct dc *dc,
struct dc_state *context, bool fast_validate)
{
bool out = false;
BW_VAL_TRACE_SETUP();
int vlevel = 0;
int pipe_split_from[MAX_PIPES];
int pipe_cnt = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT();
/*Unsafe due to current pipe merge and split logic*/
ASSERT(context != dc->current_state);
out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate);
if (pipe_cnt == 0)
goto validate_out;
if (!out)
goto validate_fail;
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
if (fast_validate) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
BW_VAL_TRACE_END_WATERMARKS();
goto validate_out;
validate_fail:
DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
BW_VAL_TRACE_SKIP(fail);
out = false;
validate_out:
kfree(pipes);
BW_VAL_TRACE_FINISH();
return out;
}
/*
* Some of the functions further below use the FPU, so we need to wrap this
* with DC_FP_START()/DC_FP_END(). Use the same approach as for
@ -1558,94 +1128,6 @@ static struct display_stream_compressor *dcn21_dsc_create(struct dc_context *ctx
return &dsc->base;
}
static struct _vcs_dpi_voltage_scaling_st construct_low_pstate_lvl(struct clk_limit_table *clk_table, unsigned int high_voltage_lvl)
{
struct _vcs_dpi_voltage_scaling_st low_pstate_lvl;
int i;
low_pstate_lvl.state = 1;
low_pstate_lvl.dcfclk_mhz = clk_table->entries[0].dcfclk_mhz;
low_pstate_lvl.fabricclk_mhz = clk_table->entries[0].fclk_mhz;
low_pstate_lvl.socclk_mhz = clk_table->entries[0].socclk_mhz;
low_pstate_lvl.dram_speed_mts = clk_table->entries[0].memclk_mhz * 2;
low_pstate_lvl.dispclk_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].dispclk_mhz;
low_pstate_lvl.dppclk_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].dppclk_mhz;
low_pstate_lvl.dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[high_voltage_lvl].dram_bw_per_chan_gbps;
low_pstate_lvl.dscclk_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].dscclk_mhz;
low_pstate_lvl.dtbclk_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].dtbclk_mhz;
low_pstate_lvl.phyclk_d18_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].phyclk_d18_mhz;
low_pstate_lvl.phyclk_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].phyclk_mhz;
for (i = clk_table->num_entries; i > 1; i--)
clk_table->entries[i] = clk_table->entries[i-1];
clk_table->entries[1] = clk_table->entries[0];
clk_table->num_entries++;
return low_pstate_lvl;
}
static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
unsigned int i, closest_clk_lvl = 0, k = 0;
int j;
dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
dcn2_1_soc.num_chans = bw_params->num_channels;
ASSERT(clk_table->num_entries);
/* Copy dcn2_1_soc.clock_limits to clock_limits to avoid copying over null states later */
for (i = 0; i < dcn2_1_soc.num_states + 1; i++) {
clock_limits[i] = dcn2_1_soc.clock_limits[i];
}
for (i = 0; i < clk_table->num_entries; i++) {
/* loop backwards*/
for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) {
if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
closest_clk_lvl = j;
break;
}
}
/* clk_table[1] is reserved for min DF PState. skip here to fill in later. */
if (i == 1)
k++;
clock_limits[k].state = k;
clock_limits[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
clock_limits[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
clock_limits[k].socclk_mhz = clk_table->entries[i].socclk_mhz;
clock_limits[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
clock_limits[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
clock_limits[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
clock_limits[k].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
clock_limits[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
clock_limits[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
clock_limits[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
clock_limits[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
k++;
}
for (i = 0; i < clk_table->num_entries + 1; i++)
dcn2_1_soc.clock_limits[i] = clock_limits[i];
if (clk_table->num_entries) {
dcn2_1_soc.num_states = clk_table->num_entries + 1;
/* fill in min DF PState */
dcn2_1_soc.clock_limits[1] = construct_low_pstate_lvl(clk_table, closest_clk_lvl);
/* duplicate last level */
dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1];
dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states;
}
dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
}
static struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx)
{
struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
@ -1896,24 +1378,6 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx)
return value;
}
static int dcn21_populate_dml_pipes_from_context(
struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
{
uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
int i;
for (i = 0; i < pipe_cnt; i++) {
pipes[i].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
pipes[i].pipe.src.gpuvm = 1;
}
return pipe_cnt;
}
static enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
enum dc_status result = DC_OK;
@ -1941,7 +1405,7 @@ static const struct resource_funcs dcn21_res_pool_funcs = {
.patch_unknown_plane_state = dcn21_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.update_bw_bounding_box = update_bw_bounding_box
.update_bw_bounding_box = dcn21_update_bw_bounding_box,
};
static bool dcn21_resource_construct(

View File

@ -35,11 +35,22 @@ struct dc;
struct resource_pool;
struct _vcs_dpi_display_pipe_params_st;
extern struct _vcs_dpi_ip_params_st dcn2_1_ip;
extern struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc;
struct dcn21_resource_pool {
struct resource_pool base;
};
struct resource_pool *dcn21_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc);
bool dcn21_fast_validate_bw(
struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *pipe_split_from,
int *vlevel_out,
bool fast_validate);
#endif /* _DCN21_RESOURCE_H_ */

View File

@ -408,6 +408,6 @@ void dpp3_cm_set_gamut_remap(
gamut_mode = 1;
//follow dcn2 approach for now - using only coefficient set A
program_gamut_remap(dpp, arr_reg_val, GAMUT_REMAP_COEFF);
program_gamut_remap(dpp, arr_reg_val, gamut_mode);
}
}

View File

@ -448,6 +448,7 @@ static const struct hubbub_funcs hubbub30_funcs = {
.program_watermarks = hubbub3_program_watermarks,
.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
.verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
.force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes,
.force_pstate_change_control = hubbub3_force_pstate_change_control,
.init_watermarks = hubbub3_init_watermarks,

View File

@ -1462,7 +1462,9 @@ int dcn30_populate_dml_pipes_from_context(
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
DC_FP_START();
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
if (!res_ctx->pipe_ctx[i].stream)
@ -1731,7 +1733,10 @@ static bool init_soc_bounding_box(struct dc *dc,
loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
loaded_ip->max_num_dpp = pool->base.pipe_count;
loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk;
DC_FP_START();
dcn20_patch_bounding_box(dc, loaded_bb);
DC_FP_END();
if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
struct bp_soc_bb_info bb_info = {0};
@ -2261,7 +2266,9 @@ static noinline void dcn30_calculate_wm_and_dlg_fp(
pipe_idx++;
}
DC_FP_START();
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
DC_FP_END();
if (!pstate_en)
/* Restore full p-state latency */

View File

@ -60,6 +60,7 @@ static const struct hubbub_funcs hubbub301_funcs = {
.program_watermarks = hubbub3_program_watermarks,
.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
.verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
.force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes,
.force_pstate_change_control = hubbub3_force_pstate_change_control,
.hubbub_read_state = hubbub2_read_state,

View File

@ -8,32 +8,6 @@
DCN3_03 = dcn303_init.o dcn303_hwseq.o dcn303_resource.o
ifdef CONFIG_X86
CFLAGS_$(AMDDALPATH)/dc/dcn303/dcn303_resource.o := -msse
endif
ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn303/dcn303_resource.o := -mhard-float -maltivec
endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
endif
CFLAGS_$(AMDDALPATH)/dc/dcn303/dcn303_resource.o += -mhard-float
endif
ifdef CONFIG_X86
ifdef IS_OLD_GCC
# Stack alignment mismatch, proceed with caution.
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
# (8B stack alignment).
CFLAGS_$(AMDDALPATH)/dc/dcn303/dcn303_resource.o += -mpreferred-stack-boundary=4
else
CFLAGS_$(AMDDALPATH)/dc/dcn303/dcn303_resource.o += -msse2
endif
endif
AMD_DAL_DCN3_03 = $(addprefix $(AMDDALPATH)/dc/dcn303/,$(DCN3_03))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN3_03)

View File

@ -52,140 +52,10 @@
#include "dpcs/dpcs_3_0_3_sh_mask.h"
#include "nbio/nbio_2_3_offset.h"
#include "dml/dcn303/dcn303_fpu.h"
#define DC_LOGGER_INIT(logger)
struct _vcs_dpi_ip_params_st dcn3_03_ip = {
.use_min_dcfclk = 0,
.clamp_min_dcfclk = 0,
.odm_capable = 1,
.gpuvm_enable = 1,
.hostvm_enable = 0,
.gpuvm_max_page_table_levels = 4,
.hostvm_max_page_table_levels = 4,
.hostvm_cached_page_table_levels = 0,
.pte_group_size_bytes = 2048,
.num_dsc = 2,
.rob_buffer_size_kbytes = 184,
.det_buffer_size_kbytes = 184,
.dpte_buffer_size_in_pte_reqs_luma = 64,
.dpte_buffer_size_in_pte_reqs_chroma = 34,
.pde_proc_buffer_size_64k_reqs = 48,
.dpp_output_buffer_pixels = 2560,
.opp_output_buffer_lines = 1,
.pixel_chunk_size_kbytes = 8,
.pte_enable = 1,
.max_page_table_levels = 2,
.pte_chunk_size_kbytes = 2, // ?
.meta_chunk_size_kbytes = 2,
.writeback_chunk_size_kbytes = 8,
.line_buffer_size_bits = 789504,
.is_line_buffer_bpp_fixed = 0, // ?
.line_buffer_fixed_bpp = 0, // ?
.dcc_supported = true,
.writeback_interface_buffer_size_kbytes = 90,
.writeback_line_buffer_buffer_size = 0,
.max_line_buffer_lines = 12,
.writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
.writeback_chroma_buffer_size_kbytes = 8,
.writeback_chroma_line_buffer_width_pixels = 4,
.writeback_max_hscl_ratio = 1,
.writeback_max_vscl_ratio = 1,
.writeback_min_hscl_ratio = 1,
.writeback_min_vscl_ratio = 1,
.writeback_max_hscl_taps = 1,
.writeback_max_vscl_taps = 1,
.writeback_line_buffer_luma_buffer_size = 0,
.writeback_line_buffer_chroma_buffer_size = 14643,
.cursor_buffer_size = 8,
.cursor_chunk_size = 2,
.max_num_otg = 2,
.max_num_dpp = 2,
.max_num_wb = 1,
.max_dchub_pscl_bw_pix_per_clk = 4,
.max_pscl_lb_bw_pix_per_clk = 2,
.max_lb_vscl_bw_pix_per_clk = 4,
.max_vscl_hscl_bw_pix_per_clk = 4,
.max_hscl_ratio = 6,
.max_vscl_ratio = 6,
.hscl_mults = 4,
.vscl_mults = 4,
.max_hscl_taps = 8,
.max_vscl_taps = 8,
.dispclk_ramp_margin_percent = 1,
.underscan_factor = 1.11,
.min_vblank_lines = 32,
.dppclk_delay_subtotal = 46,
.dynamic_metadata_vm_enabled = true,
.dppclk_delay_scl_lb_only = 16,
.dppclk_delay_scl = 50,
.dppclk_delay_cnvc_formatter = 27,
.dppclk_delay_cnvc_cursor = 6,
.dispclk_delay_subtotal = 119,
.dcfclk_cstate_latency = 5.2, // SRExitTime
.max_inter_dcn_tile_repeaters = 8,
.max_num_hdmi_frl_outputs = 1,
.odm_combine_4to1_supported = false,
.xfc_supported = false,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
.gfx7_compat_tiling_supported = 0,
.number_of_cursors = 1,
};
struct _vcs_dpi_soc_bounding_box_st dcn3_03_soc = {
.clock_limits = {
{
.state = 0,
.dispclk_mhz = 1217.0,
.dppclk_mhz = 1217.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 405.6,
},
},
.min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
.num_states = 1,
.sr_exit_time_us = 35.5,
.sr_enter_plus_exit_time_us = 40,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
.max_avg_sdp_bw_use_normal_percent = 60.0,
.max_avg_dram_bw_use_normal_percent = 40.0,
.writeback_latency_us = 12.0,
.max_request_size_bytes = 256,
.fabric_datapath_to_dcn_data_return_bytes = 64,
.dcn_downspread_percent = 0.5,
.downspread_percent = 0.38,
.dram_page_open_time_ns = 50.0,
.dram_rw_turnaround_time_ns = 17.5,
.dram_return_buffer_per_channel_bytes = 8192,
.round_trip_ping_latency_dcfclk_cycles = 156,
.urgent_out_of_order_return_per_channel_bytes = 4096,
.channel_interleave_bytes = 256,
.num_banks = 8,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
.dram_clock_change_latency_us = 404,
.dummy_pstate_latency_us = 5,
.writeback_dram_clock_change_latency_us = 23.0,
.return_bus_width_bytes = 64,
.dispclk_dppclk_vco_speed_mhz = 3650,
.xfc_bus_transport_time_us = 20, // ?
.xfc_xbuf_latency_tolerance_us = 4, // ?
.use_urgent_burst_bw = 1, // ?
.do_urgent_latency_adjustment = true,
.urgent_latency_adjustment_fabric_clock_component_us = 1.0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
};
static const struct dc_debug_options debug_defaults_drv = {
.disable_dmcu = true,
@ -1031,24 +901,18 @@ static bool init_soc_bounding_box(struct dc *dc, struct resource_pool *pool)
loaded_ip->max_num_otg = pool->pipe_count;
loaded_ip->max_num_dpp = pool->pipe_count;
loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk;
DC_FP_START();
dcn20_patch_bounding_box(dc, loaded_bb);
DC_FP_END();
if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
struct bp_soc_bb_info bb_info = { 0 };
if (dc->ctx->dc_bios->funcs->get_soc_bb_info(
dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
if (bb_info.dram_clock_change_latency_100ns > 0)
dcn3_03_soc.dram_clock_change_latency_us =
bb_info.dram_clock_change_latency_100ns * 10;
if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
dcn3_03_soc.sr_enter_plus_exit_time_us =
bb_info.dram_sr_enter_exit_latency_100ns * 10;
if (bb_info.dram_sr_exit_latency_100ns > 0)
dcn3_03_soc.sr_exit_time_us =
bb_info.dram_sr_exit_latency_100ns * 10;
DC_FP_START();
dcn303_fpu_init_soc_bounding_box(bb_info);
DC_FP_END();
}
}
@ -1186,183 +1050,12 @@ static void dcn303_destroy_resource_pool(struct resource_pool **pool)
*pool = NULL;
}
static void dcn303_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
unsigned int *optimal_dcfclk,
unsigned int *optimal_fclk)
{
double bw_from_dram, bw_from_dram1, bw_from_dram2;
bw_from_dram1 = uclk_mts * dcn3_03_soc.num_chans *
dcn3_03_soc.dram_channel_width_bytes * (dcn3_03_soc.max_avg_dram_bw_use_normal_percent / 100);
bw_from_dram2 = uclk_mts * dcn3_03_soc.num_chans *
dcn3_03_soc.dram_channel_width_bytes * (dcn3_03_soc.max_avg_sdp_bw_use_normal_percent / 100);
bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
if (optimal_fclk)
*optimal_fclk = bw_from_dram /
(dcn3_03_soc.fabric_datapath_to_dcn_data_return_bytes *
(dcn3_03_soc.max_avg_sdp_bw_use_normal_percent / 100));
if (optimal_dcfclk)
*optimal_dcfclk = bw_from_dram /
(dcn3_03_soc.return_bus_width_bytes * (dcn3_03_soc.max_avg_sdp_bw_use_normal_percent / 100));
}
void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
unsigned int i, j;
unsigned int num_states = 0;
unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0};
unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0};
unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0};
unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0};
unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200};
unsigned int num_dcfclk_sta_targets = 4;
unsigned int num_uclk_states;
if (dc->ctx->dc_bios->vram_info.num_chans)
dcn3_03_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
dcn3_03_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
dcn3_03_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
if (bw_params->clk_table.entries[0].memclk_mhz) {
int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
}
if (!max_dcfclk_mhz)
max_dcfclk_mhz = dcn3_03_soc.clock_limits[0].dcfclk_mhz;
if (!max_dispclk_mhz)
max_dispclk_mhz = dcn3_03_soc.clock_limits[0].dispclk_mhz;
if (!max_dppclk_mhz)
max_dppclk_mhz = dcn3_03_soc.clock_limits[0].dppclk_mhz;
if (!max_phyclk_mhz)
max_phyclk_mhz = dcn3_03_soc.clock_limits[0].phyclk_mhz;
if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
num_dcfclk_sta_targets++;
} else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
for (i = 0; i < num_dcfclk_sta_targets; i++) {
if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
dcfclk_sta_targets[i] = max_dcfclk_mhz;
break;
}
}
/* Update size of array since we "removed" duplicates */
num_dcfclk_sta_targets = i + 1;
}
num_uclk_states = bw_params->clk_table.num_entries;
/* Calculate optimal dcfclk for each uclk */
for (i = 0; i < num_uclk_states; i++) {
dcn303_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
&optimal_dcfclk_for_uclk[i], NULL);
if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz)
optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz;
}
/* Calculate optimal uclk for each dcfclk sta target */
for (i = 0; i < num_dcfclk_sta_targets; i++) {
for (j = 0; j < num_uclk_states; j++) {
if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
optimal_uclk_for_dcfclk_sta_targets[i] =
bw_params->clk_table.entries[j].memclk_mhz * 16;
break;
}
}
}
i = 0;
j = 0;
/* create the final dcfclk and uclk table */
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] =
bw_params->clk_table.entries[j++].memclk_mhz * 16;
} else {
j = num_uclk_states;
}
}
}
while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
}
while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
}
dcn3_03_soc.num_states = num_states;
for (i = 0; i < dcn3_03_soc.num_states; i++) {
dcn3_03_soc.clock_limits[i].state = i;
dcn3_03_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
dcn3_03_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
dcn3_03_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
/* Fill all states with max values of all other clocks */
dcn3_03_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
dcn3_03_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
dcn3_03_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
/* Populate from bw_params for DTBCLK, SOCCLK */
if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0)
dcn3_03_soc.clock_limits[i].dtbclk_mhz = dcn3_03_soc.clock_limits[i-1].dtbclk_mhz;
else
dcn3_03_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0)
dcn3_03_soc.clock_limits[i].socclk_mhz = dcn3_03_soc.clock_limits[i-1].socclk_mhz;
else
dcn3_03_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz;
/* These clocks cannot come from bw_params, always fill from dcn3_03_soc[1] */
/* FCLK, PHYCLK_D18, DSCCLK */
dcn3_03_soc.clock_limits[i].phyclk_d18_mhz = dcn3_03_soc.clock_limits[0].phyclk_d18_mhz;
dcn3_03_soc.clock_limits[i].dscclk_mhz = dcn3_03_soc.clock_limits[0].dscclk_mhz;
}
// WA: patch strobe modes to compensate for DCN303 BW issue
if (dcn3_03_soc.num_chans <= 4) {
for (i = 0; i < dcn3_03_soc.num_states; i++) {
if (dcn3_03_soc.clock_limits[i].dram_speed_mts > 1700)
break;
if (dcn3_03_soc.clock_limits[i].dram_speed_mts >= 1500) {
dcn3_03_soc.clock_limits[i].dcfclk_mhz = 100;
dcn3_03_soc.clock_limits[i].fabricclk_mhz = 100;
}
}
}
/* re-init DML with updated bb */
dml_init_instance(&dc->dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30);
if (dc->current_state)
dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30);
}
DC_FP_START();
dcn303_fpu_update_bw_bounding_box(dc, bw_params);
DC_FP_END();
}
static struct resource_funcs dcn303_res_pool_funcs = {

View File

@ -10,6 +10,9 @@
#include "core_types.h"
extern struct _vcs_dpi_ip_params_st dcn3_03_ip;
extern struct _vcs_dpi_soc_bounding_box_st dcn3_03_soc;
struct resource_pool *dcn303_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc);
void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);

View File

@ -949,6 +949,65 @@ static void hubbub31_get_dchub_ref_freq(struct hubbub *hubbub,
}
}
static bool hubbub31_verify_allow_pstate_change_high(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
/*
* Pstate latency is ~20us so if we wait over 40us and pstate allow
* still not asserted, we are probably stuck and going to hang
*/
const unsigned int pstate_wait_timeout_us = 100;
const unsigned int pstate_wait_expected_timeout_us = 40;
static unsigned int max_sampled_pstate_wait_us; /* data collection */
static bool forced_pstate_allow; /* help with revert wa */
unsigned int debug_data = 0;
unsigned int i;
if (forced_pstate_allow) {
/* we hacked to force pstate allow to prevent hang last time
* we verify_allow_pstate_change_high. so disable force
* here so we can check status
*/
REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0,
DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0);
forced_pstate_allow = false;
}
REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub2->debug_test_index_pstate);
for (i = 0; i < pstate_wait_timeout_us; i++) {
debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
/* Debug bit is specific to ASIC. */
if (debug_data & (1 << 26)) {
if (i > pstate_wait_expected_timeout_us)
DC_LOG_WARNING("pstate took longer than expected ~%dus\n", i);
return true;
}
if (max_sampled_pstate_wait_us < i)
max_sampled_pstate_wait_us = i;
udelay(1);
}
/* force pstate allow to prevent system hang
* and break to debugger to investigate
*/
REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1,
DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1);
forced_pstate_allow = true;
DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n",
debug_data);
return false;
}
static const struct hubbub_funcs hubbub31_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx,
@ -961,6 +1020,7 @@ static const struct hubbub_funcs hubbub31_funcs = {
.program_watermarks = hubbub31_program_watermarks,
.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
.verify_allow_pstate_change_high = hubbub31_verify_allow_pstate_change_high,
.program_det_size = dcn31_program_det_size,
.program_compbuf_size = dcn31_program_compbuf_size,
.init_crb = dcn31_init_crb,

View File

@ -83,7 +83,8 @@ static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
cmd.panel_cntl.data.bl_pwm_period_cntl = panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL;
cmd.panel_cntl.data.bl_pwm_ref_div1 =
panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
cmd.panel_cntl.data.bl_pwm_ref_div2 =
panel_cntl->stored_backlight_registers.PANEL_PWRSEQ_REF_DIV2;
if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, &cmd))
return 0;
@ -92,6 +93,8 @@ static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL = cmd.panel_cntl.data.bl_pwm_period_cntl;
panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV =
cmd.panel_cntl.data.bl_pwm_ref_div1;
panel_cntl->stored_backlight_registers.PANEL_PWRSEQ_REF_DIV2 =
cmd.panel_cntl.data.bl_pwm_ref_div2;
return cmd.panel_cntl.data.current_backlight;
}

View File

@ -1011,7 +1011,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.max_downscale_src_width = 4096,/*upto true 4K*/
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
.sanity_checks = false,
.sanity_checks = true,
.underflow_assert_delay_us = 0xFFFFFFFF,
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
@ -1787,7 +1787,9 @@ int dcn31_populate_dml_pipes_from_context(
struct pipe_ctx *pipe;
bool upscaled = false;
DC_FP_START();
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
@ -1999,7 +2001,9 @@ static void dcn31_calculate_wm_and_dlg_fp(
pipe_idx++;
}
DC_FP_START();
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
DC_FP_END();
}
void dcn31_calculate_wm_and_dlg(
@ -2177,7 +2181,7 @@ static struct clock_source *dcn30_clock_source_create(
if (!clk_src)
return NULL;
if (dcn3_clk_src_construct(clk_src, ctx, bios, id,
if (dcn31_clk_src_construct(clk_src, ctx, bios, id,
regs, &cs_shift, &cs_mask)) {
clk_src->base.dp_clk_src = dp_clk_src;
return &clk_src->base;

View File

@ -1760,7 +1760,7 @@ static struct clock_source *dcn31_clock_source_create(
if (!clk_src)
return NULL;
if (dcn3_clk_src_construct(clk_src, ctx, bios, id,
if (dcn31_clk_src_construct(clk_src, ctx, bios, id,
regs, &cs_shift, &cs_mask)) {
clk_src->base.dp_clk_src = dp_clk_src;
return &clk_src->base;
@ -1785,7 +1785,9 @@ static int dcn315_populate_dml_pipes_from_context(
struct pipe_ctx *pipe;
const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB;
DC_FP_START();
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
@ -1963,29 +1965,6 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
};
static struct clock_source *dcn30_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
const struct dce110_clk_src_regs *regs,
bool dp_clk_src)
{
struct dce110_clk_src *clk_src =
kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
if (!clk_src)
return NULL;
if (dcn3_clk_src_construct(clk_src, ctx, bios, id,
regs, &cs_shift, &cs_mask)) {
clk_src->base.dp_clk_src = dp_clk_src;
return &clk_src->base;
}
BREAK_TO_DEBUGGER();
return NULL;
}
static bool dcn315_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
@ -2091,23 +2070,23 @@ static bool dcn315_resource_construct(
/* Clock Sources for Pixel Clock*/
pool->base.clock_sources[DCN31_CLK_SRC_PLL0] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL0,
&clk_src_regs[0], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL1] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL1,
&clk_src_regs[1], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL3,
&clk_src_regs[3], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL4] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL4,
&clk_src_regs[4], false);

View File

@ -1759,7 +1759,7 @@ static struct clock_source *dcn31_clock_source_create(
if (!clk_src)
return NULL;
if (dcn3_clk_src_construct(clk_src, ctx, bios, id,
if (dcn31_clk_src_construct(clk_src, ctx, bios, id,
regs, &cs_shift, &cs_mask)) {
clk_src->base.dp_clk_src = dp_clk_src;
return &clk_src->base;
@ -1786,7 +1786,9 @@ static int dcn316_populate_dml_pipes_from_context(
struct pipe_ctx *pipe;
const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_16_MIN_COMPBUF_SIZE_KB;
DC_FP_START();
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
@ -1964,29 +1966,6 @@ static struct resource_funcs dcn316_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
};
static struct clock_source *dcn30_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
const struct dce110_clk_src_regs *regs,
bool dp_clk_src)
{
struct dce110_clk_src *clk_src =
kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
if (!clk_src)
return NULL;
if (dcn3_clk_src_construct(clk_src, ctx, bios, id,
regs, &cs_shift, &cs_mask)) {
clk_src->base.dp_clk_src = dp_clk_src;
return &clk_src->base;
}
BREAK_TO_DEBUGGER();
return NULL;
}
static bool dcn316_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
@ -2092,23 +2071,23 @@ static bool dcn316_resource_construct(
/* Clock Sources for Pixel Clock*/
pool->base.clock_sources[DCN31_CLK_SRC_PLL0] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL0,
&clk_src_regs[0], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL1] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL1,
&clk_src_regs[1], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL3,
&clk_src_regs[3], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL4] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
dcn31_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL4,
&clk_src_regs[4], false);

View File

@ -58,6 +58,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
ifdef CONFIG_DRM_AMD_DC_DCN
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
@ -72,6 +73,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(fram
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn303/dcn303_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_ccflags)
@ -105,6 +107,7 @@ DML = calcs/dce_calcs.o calcs/custom_float.o calcs/bw_fixed.o
ifdef CONFIG_DRM_AMD_DC_DCN
DML += display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o
DML += dcn10/dcn10_fpu.o
DML += dcn20/dcn20_fpu.o
DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o
DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
@ -113,6 +116,7 @@ DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o
DML += dcn301/dcn301_fpu.o
DML += dcn302/dcn302_fpu.o
DML += dcn303/dcn303_fpu.o
DML += dsc/rc_calc_fpu.o
DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o
endif

View File

@ -0,0 +1,123 @@
// SPDX-License-Identifier: MIT
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dcn10/dcn10_resource.h"
#include "dcn10_fpu.h"
/**
* DOC: DCN10 FPU manipulation Overview
*
* The DCN architecture relies on FPU operations, which require special
* compilation flags and the use of kernel_fpu_begin/end functions; ideally, we
* want to avoid spreading FPU access across multiple files. With this idea in
* mind, this file aims to centralize DCN10 functions that require FPU access
* in a single place. Code in this file follows the following code pattern:
*
* 1. Functions that use FPU operations should be isolated in static functions.
* 2. The FPU functions should have the noinline attribute to ensure anything
* that deals with FP register is contained within this call.
* 3. All function that needs to be accessed outside this file requires a
* public interface that not uses any FPU reference.
* 4. Developers **must not** use DC_FP_START/END in this file, but they need
* to ensure that the caller invokes it before access any function available
* in this file. For this reason, public functions in this file must invoke
* dc_assert_fp_enabled();
*
* Let's expand a little bit more the idea in the code pattern. To fully
* isolate FPU operations in a single place, we must avoid situations where
* compilers spill FP values to registers due to FP enable in a specific C
* file. Note that even if we isolate all FPU functions in a single file and
* call its interface from other files, the compiler might enable the use of
* FPU before we call DC_FP_START. Nevertheless, it is the programmer's
* responsibility to invoke DC_FP_START/END in the correct place. To highlight
* situations where developers forgot to use the FP protection before calling
* the DC FPU interface functions, we introduce a helper that checks if the
* function is invoked under FP protection. If not, it will trigger a kernel
* warning.
*/
struct _vcs_dpi_ip_params_st dcn1_0_ip = {
.rob_buffer_size_kbytes = 64,
.det_buffer_size_kbytes = 164,
.dpte_buffer_size_in_pte_reqs_luma = 42,
.dpp_output_buffer_pixels = 2560,
.opp_output_buffer_lines = 1,
.pixel_chunk_size_kbytes = 8,
.pte_enable = 1,
.pte_chunk_size_kbytes = 2,
.meta_chunk_size_kbytes = 2,
.writeback_chunk_size_kbytes = 2,
.line_buffer_size_bits = 589824,
.max_line_buffer_lines = 12,
.IsLineBufferBppFixed = 0,
.LineBufferFixedBpp = -1,
.writeback_luma_buffer_size_kbytes = 12,
.writeback_chroma_buffer_size_kbytes = 8,
.max_num_dpp = 4,
.max_num_wb = 2,
.max_dchub_pscl_bw_pix_per_clk = 4,
.max_pscl_lb_bw_pix_per_clk = 2,
.max_lb_vscl_bw_pix_per_clk = 4,
.max_vscl_hscl_bw_pix_per_clk = 4,
.max_hscl_ratio = 4,
.max_vscl_ratio = 4,
.hscl_mults = 4,
.vscl_mults = 4,
.max_hscl_taps = 8,
.max_vscl_taps = 8,
.dispclk_ramp_margin_percent = 1,
.underscan_factor = 1.10,
.min_vblank_lines = 14,
.dppclk_delay_subtotal = 90,
.dispclk_delay_subtotal = 42,
.dcfclk_cstate_latency = 10,
.max_inter_dcn_tile_repeaters = 8,
.can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0,
.bug_forcing_LC_req_same_size_fixed = 0,
};
struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = {
.sr_exit_time_us = 9.0,
.sr_enter_plus_exit_time_us = 11.0,
.urgent_latency_us = 4.0,
.writeback_latency_us = 12.0,
.ideal_dram_bw_after_urgent_percent = 80.0,
.max_request_size_bytes = 256,
.downspread_percent = 0.5,
.dram_page_open_time_ns = 50.0,
.dram_rw_turnaround_time_ns = 17.5,
.dram_return_buffer_per_channel_bytes = 8192,
.round_trip_ping_latency_dcfclk_cycles = 128,
.urgent_out_of_order_return_per_channel_bytes = 256,
.channel_interleave_bytes = 256,
.num_banks = 8,
.num_chans = 2,
.vmm_page_size_bytes = 4096,
.dram_clock_change_latency_us = 17.0,
.writeback_dram_clock_change_latency_us = 23.0,
.return_bus_width_bytes = 64,
};

View File

@ -1,3 +1,4 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
@ -19,12 +20,11 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "amdgpu.h"
#include "amdgpu_ras.h"
#ifndef __DCN10_FPU_H__
#define __DCN10_FPU_H__
void amdgpu_hdp_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
}
#endif /* __DCN20_FPU_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -23,6 +23,7 @@
* Authors: AMD
*
*/
#include "core_types.h"
#ifndef __DCN20_FPU_H__
#define __DCN20_FPU_H__
@ -31,4 +32,54 @@ void dcn20_populate_dml_writeback_from_context(struct dc *dc,
struct resource_context *res_ctx,
display_e2e_pipe_params_st *pipes);
void dcn20_fpu_set_wb_arb_params(struct mcif_arb_params *wb_arb_params,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt, int i);
void dcn20_calculate_dlg_params(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
int vlevel);
int dcn20_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate);
void dcn20_calculate_wm(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *out_pipe_cnt,
int *pipe_split_from,
int vlevel,
bool fast_validate);
void dcn20_cap_soc_clocks(struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table max_clocks);
void dcn20_update_bounding_box(struct dc *dc,
struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table *max_clocks,
unsigned int *uclk_states,
unsigned int num_states);
void dcn20_patch_bounding_box(struct dc *dc,
struct _vcs_dpi_soc_bounding_box_st *bb);
bool dcn20_validate_bandwidth_fp(struct dc *dc,
struct dc_state *context,
bool fast_validate);
void dcn20_fpu_set_wm_ranges(int i,
struct pp_smu_wm_range_sets *ranges,
struct _vcs_dpi_soc_bounding_box_st *loaded_bb);
void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v,
int vlevel,
int max_mpc_comb,
int pipe_idx,
bool is_validating_bw);
int dcn21_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate);
bool dcn21_validate_bandwidth_fp(struct dc *dc,
struct dc_state *context,
bool fast_validate);
void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
#endif /* __DCN20_FPU_H__ */

View File

@ -0,0 +1,362 @@
/*
* Copyright 2019-2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "resource.h"
#include "clk_mgr.h"
#include "dcn20/dcn20_resource.h"
#include "dcn303/dcn303_resource.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dcn303_fpu.h"
struct _vcs_dpi_ip_params_st dcn3_03_ip = {
.use_min_dcfclk = 0,
.clamp_min_dcfclk = 0,
.odm_capable = 1,
.gpuvm_enable = 1,
.hostvm_enable = 0,
.gpuvm_max_page_table_levels = 4,
.hostvm_max_page_table_levels = 4,
.hostvm_cached_page_table_levels = 0,
.pte_group_size_bytes = 2048,
.num_dsc = 2,
.rob_buffer_size_kbytes = 184,
.det_buffer_size_kbytes = 184,
.dpte_buffer_size_in_pte_reqs_luma = 64,
.dpte_buffer_size_in_pte_reqs_chroma = 34,
.pde_proc_buffer_size_64k_reqs = 48,
.dpp_output_buffer_pixels = 2560,
.opp_output_buffer_lines = 1,
.pixel_chunk_size_kbytes = 8,
.pte_enable = 1,
.max_page_table_levels = 2,
.pte_chunk_size_kbytes = 2, // ?
.meta_chunk_size_kbytes = 2,
.writeback_chunk_size_kbytes = 8,
.line_buffer_size_bits = 789504,
.is_line_buffer_bpp_fixed = 0, // ?
.line_buffer_fixed_bpp = 0, // ?
.dcc_supported = true,
.writeback_interface_buffer_size_kbytes = 90,
.writeback_line_buffer_buffer_size = 0,
.max_line_buffer_lines = 12,
.writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
.writeback_chroma_buffer_size_kbytes = 8,
.writeback_chroma_line_buffer_width_pixels = 4,
.writeback_max_hscl_ratio = 1,
.writeback_max_vscl_ratio = 1,
.writeback_min_hscl_ratio = 1,
.writeback_min_vscl_ratio = 1,
.writeback_max_hscl_taps = 1,
.writeback_max_vscl_taps = 1,
.writeback_line_buffer_luma_buffer_size = 0,
.writeback_line_buffer_chroma_buffer_size = 14643,
.cursor_buffer_size = 8,
.cursor_chunk_size = 2,
.max_num_otg = 2,
.max_num_dpp = 2,
.max_num_wb = 1,
.max_dchub_pscl_bw_pix_per_clk = 4,
.max_pscl_lb_bw_pix_per_clk = 2,
.max_lb_vscl_bw_pix_per_clk = 4,
.max_vscl_hscl_bw_pix_per_clk = 4,
.max_hscl_ratio = 6,
.max_vscl_ratio = 6,
.hscl_mults = 4,
.vscl_mults = 4,
.max_hscl_taps = 8,
.max_vscl_taps = 8,
.dispclk_ramp_margin_percent = 1,
.underscan_factor = 1.11,
.min_vblank_lines = 32,
.dppclk_delay_subtotal = 46,
.dynamic_metadata_vm_enabled = true,
.dppclk_delay_scl_lb_only = 16,
.dppclk_delay_scl = 50,
.dppclk_delay_cnvc_formatter = 27,
.dppclk_delay_cnvc_cursor = 6,
.dispclk_delay_subtotal = 119,
.dcfclk_cstate_latency = 5.2, // SRExitTime
.max_inter_dcn_tile_repeaters = 8,
.max_num_hdmi_frl_outputs = 1,
.odm_combine_4to1_supported = false,
.xfc_supported = false,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
.gfx7_compat_tiling_supported = 0,
.number_of_cursors = 1,
};
struct _vcs_dpi_soc_bounding_box_st dcn3_03_soc = {
.clock_limits = {
{
.state = 0,
.dispclk_mhz = 562.0,
.dppclk_mhz = 300.0,
.phyclk_mhz = 300.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 405.6,
},
},
.min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
.num_states = 1,
.sr_exit_time_us = 35.5,
.sr_enter_plus_exit_time_us = 40,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
.max_avg_sdp_bw_use_normal_percent = 60.0,
.max_avg_dram_bw_use_normal_percent = 40.0,
.writeback_latency_us = 12.0,
.max_request_size_bytes = 256,
.fabric_datapath_to_dcn_data_return_bytes = 64,
.dcn_downspread_percent = 0.5,
.downspread_percent = 0.38,
.dram_page_open_time_ns = 50.0,
.dram_rw_turnaround_time_ns = 17.5,
.dram_return_buffer_per_channel_bytes = 8192,
.round_trip_ping_latency_dcfclk_cycles = 156,
.urgent_out_of_order_return_per_channel_bytes = 4096,
.channel_interleave_bytes = 256,
.num_banks = 8,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
.dram_clock_change_latency_us = 404,
.dummy_pstate_latency_us = 5,
.writeback_dram_clock_change_latency_us = 23.0,
.return_bus_width_bytes = 64,
.dispclk_dppclk_vco_speed_mhz = 3650,
.xfc_bus_transport_time_us = 20, // ?
.xfc_xbuf_latency_tolerance_us = 4, // ?
.use_urgent_burst_bw = 1, // ?
.do_urgent_latency_adjustment = true,
.urgent_latency_adjustment_fabric_clock_component_us = 1.0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
};
static void dcn303_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
unsigned int *optimal_dcfclk,
unsigned int *optimal_fclk)
{
double bw_from_dram, bw_from_dram1, bw_from_dram2;
bw_from_dram1 = uclk_mts * dcn3_03_soc.num_chans *
dcn3_03_soc.dram_channel_width_bytes * (dcn3_03_soc.max_avg_dram_bw_use_normal_percent / 100);
bw_from_dram2 = uclk_mts * dcn3_03_soc.num_chans *
dcn3_03_soc.dram_channel_width_bytes * (dcn3_03_soc.max_avg_sdp_bw_use_normal_percent / 100);
bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
if (optimal_fclk)
*optimal_fclk = bw_from_dram /
(dcn3_03_soc.fabric_datapath_to_dcn_data_return_bytes *
(dcn3_03_soc.max_avg_sdp_bw_use_normal_percent / 100));
if (optimal_dcfclk)
*optimal_dcfclk = bw_from_dram /
(dcn3_03_soc.return_bus_width_bytes * (dcn3_03_soc.max_avg_sdp_bw_use_normal_percent / 100));
}
void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
unsigned int i, j;
unsigned int num_states = 0;
unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0};
unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0};
unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0};
unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0};
unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200};
unsigned int num_dcfclk_sta_targets = 4;
unsigned int num_uclk_states;
dc_assert_fp_enabled();
if (dc->ctx->dc_bios->vram_info.num_chans)
dcn3_03_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
dcn3_03_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
dcn3_03_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
if (bw_params->clk_table.entries[0].memclk_mhz) {
int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
}
if (!max_dcfclk_mhz)
max_dcfclk_mhz = dcn3_03_soc.clock_limits[0].dcfclk_mhz;
if (!max_dispclk_mhz)
max_dispclk_mhz = dcn3_03_soc.clock_limits[0].dispclk_mhz;
if (!max_dppclk_mhz)
max_dppclk_mhz = dcn3_03_soc.clock_limits[0].dppclk_mhz;
if (!max_phyclk_mhz)
max_phyclk_mhz = dcn3_03_soc.clock_limits[0].phyclk_mhz;
if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
num_dcfclk_sta_targets++;
} else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
for (i = 0; i < num_dcfclk_sta_targets; i++) {
if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
dcfclk_sta_targets[i] = max_dcfclk_mhz;
break;
}
}
/* Update size of array since we "removed" duplicates */
num_dcfclk_sta_targets = i + 1;
}
num_uclk_states = bw_params->clk_table.num_entries;
/* Calculate optimal dcfclk for each uclk */
for (i = 0; i < num_uclk_states; i++) {
dcn303_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
&optimal_dcfclk_for_uclk[i], NULL);
if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz)
optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz;
}
/* Calculate optimal uclk for each dcfclk sta target */
for (i = 0; i < num_dcfclk_sta_targets; i++) {
for (j = 0; j < num_uclk_states; j++) {
if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
optimal_uclk_for_dcfclk_sta_targets[i] =
bw_params->clk_table.entries[j].memclk_mhz * 16;
break;
}
}
}
i = 0;
j = 0;
/* create the final dcfclk and uclk table */
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] =
bw_params->clk_table.entries[j++].memclk_mhz * 16;
} else {
j = num_uclk_states;
}
}
}
while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
}
while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
}
dcn3_03_soc.num_states = num_states;
for (i = 0; i < dcn3_03_soc.num_states; i++) {
dcn3_03_soc.clock_limits[i].state = i;
dcn3_03_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
dcn3_03_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
dcn3_03_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
/* Fill all states with max values of all other clocks */
dcn3_03_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
dcn3_03_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
dcn3_03_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
/* Populate from bw_params for DTBCLK, SOCCLK */
if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0)
dcn3_03_soc.clock_limits[i].dtbclk_mhz = dcn3_03_soc.clock_limits[i-1].dtbclk_mhz;
else
dcn3_03_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0)
dcn3_03_soc.clock_limits[i].socclk_mhz = dcn3_03_soc.clock_limits[i-1].socclk_mhz;
else
dcn3_03_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz;
/* These clocks cannot come from bw_params, always fill from dcn3_03_soc[1] */
/* FCLK, PHYCLK_D18, DSCCLK */
dcn3_03_soc.clock_limits[i].phyclk_d18_mhz = dcn3_03_soc.clock_limits[0].phyclk_d18_mhz;
dcn3_03_soc.clock_limits[i].dscclk_mhz = dcn3_03_soc.clock_limits[0].dscclk_mhz;
}
if (dcn3_03_soc.num_chans <= 4) {
for (i = 0; i < dcn3_03_soc.num_states; i++) {
if (dcn3_03_soc.clock_limits[i].dram_speed_mts > 1700)
break;
if (dcn3_03_soc.clock_limits[i].dram_speed_mts >= 1500) {
dcn3_03_soc.clock_limits[i].dcfclk_mhz = 100;
dcn3_03_soc.clock_limits[i].fabricclk_mhz = 100;
}
}
}
/* re-init DML with updated bb */
dml_init_instance(&dc->dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30);
if (dc->current_state)
dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30);
}
}
void dcn303_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info)
{
dc_assert_fp_enabled();
if (bb_info.dram_clock_change_latency_100ns > 0)
dcn3_03_soc.dram_clock_change_latency_us =
bb_info.dram_clock_change_latency_100ns * 10;
if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
dcn3_03_soc.sr_enter_plus_exit_time_us =
bb_info.dram_sr_enter_exit_latency_100ns * 10;
if (bb_info.dram_sr_exit_latency_100ns > 0)
dcn3_03_soc.sr_exit_time_us =
bb_info.dram_sr_exit_latency_100ns * 10;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2019 Advanced Micro Devices, Inc.
* Copyright 2019-2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -19,12 +19,14 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "amdgpu.h"
#include "amdgpu_ras.h"
#ifndef __DCN303_FPU_H__
#define __DCN303_FPU_H__
void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
void dcn303_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info);
}
#endif /* __DCN303_FPU_H__*/

View File

@ -647,7 +647,9 @@ enum gpio_result dal_ddc_set_config(
void dal_ddc_close(
struct ddc *ddc)
{
dal_gpio_close(ddc->pin_clock);
dal_gpio_close(ddc->pin_data);
if (ddc != NULL) {
dal_gpio_close(ddc->pin_clock);
dal_gpio_close(ddc->pin_data);
}
}

View File

@ -154,6 +154,8 @@ struct hubbub_funcs {
bool (*is_allow_self_refresh_enabled)(struct hubbub *hubbub);
void (*allow_self_refresh_control)(struct hubbub *hubbub, bool allow);
bool (*verify_allow_pstate_change_high)(struct hubbub *hubbub);
void (*apply_DEDCN21_147_wa)(struct hubbub *hubbub);
void (*force_wm_propagate_to_pipes)(struct hubbub *hubbub);

View File

@ -195,6 +195,7 @@ struct hubp_funcs {
void (*hubp_set_flip_int)(struct hubp *hubp);
void (*hubp_wait_pipe_read_start)(struct hubp *hubp);
};
#endif

View File

@ -39,6 +39,7 @@ struct panel_cntl_backlight_registers {
unsigned int BL_PWM_CNTL2;
unsigned int BL_PWM_PERIOD_CNTL;
unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
unsigned int PANEL_PWRSEQ_REF_DIV2;
};
struct panel_cntl_funcs {

View File

@ -42,6 +42,7 @@ struct dce_hwseq_wa {
bool DEGVIDCN21;
bool disallow_self_refresh_during_multi_plane_transition;
bool dp_hpo_and_otg_sequence;
bool wait_hubpret_read_start_during_mpo_transition;
};
struct hwseq_wa_state {

View File

@ -39,6 +39,11 @@ void link_enc_cfg_init(
const struct dc *dc,
struct dc_state *state);
/*
* Copies a link encoder assignment from another state.
*/
void link_enc_cfg_copy(const struct dc_state *src_ctx, struct dc_state *dst_ctx);
/*
* Algorithm for assigning available DIG link encoders to streams.
*

Some files were not shown because too many files have changed in this diff Show More