amd-drm-next-6.10-2024-04-19:

amdgpu:
 - DC resource allocation logic updates
 - DC IPS fixes
 - DC YUV fixes
 - DMCUB fixes
 - DML2 fixes
 - Devcoredump updates
 - USB-C DSC fix
 - Misc display code cleanups
 - PSR fixes
 - MES timeout fix
 - RAS updates
 - UAF fix in VA IOCTL
 - Fix visible VRAM handling during faults
 - Fix IP discovery handling during PCI rescans
 - Misc code cleanups
 - PSP 14 updates
 - More runtime PM code rework
 - SMU 14.0.2 support
 - GPUVM page fault redirection to secondary IH rings for IH 6.x
 - Suspend/resume fixes
 - SR-IOV fixes
 
 amdkfd:
 - Fix eviction fence handling
 - Fix leak in GPU memory allocation failure case
 - DMABuf import handling fix
 
 radeon:
 - Silence UBSAN warnings related to flexible arrays
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCZiLyawAKCRC93/aFa7yZ
 2BzfAPoDVZjTunizh6SyCFmQamR3eelnxWeY1xaVzmKBHqLCOAEAo2EyThRGyPCH
 SjD+f+ZlflaXQZtZpiQrOr0rkLvh5Q4=
 =96hT
 -----END PGP SIGNATURE-----

Merge tag 'amd-drm-next-6.10-2024-04-19' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-next-6.10-2024-04-19:

amdgpu:
- DC resource allocation logic updates
- DC IPS fixes
- DC YUV fixes
- DMCUB fixes
- DML2 fixes
- Devcoredump updates
- USB-C DSC fix
- Misc display code cleanups
- PSR fixes
- MES timeout fix
- RAS updates
- UAF fix in VA IOCTL
- Fix visible VRAM handling during faults
- Fix IP discovery handling during PCI rescans
- Misc code cleanups
- PSP 14 updates
- More runtime PM code rework
- SMU 14.0.2 support
- GPUVM page fault redirection to secondary IH rings for IH 6.x
- Suspend/resume fixes
- SR-IOV fixes

amdkfd:
- Fix eviction fence handling
- Fix leak in GPU memory allocation failure case
- DMABuf import handling fix

radeon:
- Silence UBSAN warnings related to flexible arrays

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240419224332.2938259-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie 2024-04-22 10:51:39 +10:00
commit 377b5b397d
75 changed files with 4594 additions and 349 deletions

View File

@ -1409,6 +1409,7 @@ bool amdgpu_device_supports_px(struct drm_device *dev);
bool amdgpu_device_supports_boco(struct drm_device *dev);
bool amdgpu_device_supports_smart_shift(struct drm_device *dev);
int amdgpu_device_supports_baco(struct drm_device *dev);
void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev);
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
int amdgpu_device_baco_enter(struct drm_device *dev);

View File

@ -753,23 +753,13 @@ int aca_bank_info_decode(struct aca_bank *bank, struct aca_bank_info *info)
static int aca_bank_get_error_code(struct amdgpu_device *adev, struct aca_bank *bank)
{
int error_code;
struct amdgpu_aca *aca = &adev->aca;
const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 6):
if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600) {
error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]);
return error_code & 0xff;
}
break;
default:
break;
}
if (!smu_funcs || !smu_funcs->parse_error_code)
return -EOPNOTSUPP;
/* NOTE: the true error code is encoded in status.errorcode[0:7] */
error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]);
return error_code & 0xff;
return smu_funcs->parse_error_code(adev, bank);
}
int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank, int *err_codes, int size)
@ -780,6 +770,9 @@ int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank
return -EINVAL;
error_code = aca_bank_get_error_code(adev, bank);
if (error_code < 0)
return error_code;
for (i = 0; i < size; i++) {
if (err_codes[i] == error_code)
return 0;

View File

@ -173,6 +173,7 @@ struct aca_smu_funcs {
int (*set_debug_mode)(struct amdgpu_device *adev, bool enable);
int (*get_valid_aca_count)(struct amdgpu_device *adev, enum aca_smu_type type, u32 *count);
int (*get_valid_aca_bank)(struct amdgpu_device *adev, enum aca_smu_type type, int idx, struct aca_bank *bank);
int (*parse_error_code)(struct amdgpu_device *adev, struct aca_bank *bank);
};
struct amdgpu_aca {

View File

@ -1854,6 +1854,7 @@ err_node_allow:
err_bo_create:
amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id);
err_reserve_limit:
amdgpu_sync_free(&(*mem)->sync);
mutex_destroy(&(*mem)->lock);
if (gobj)
drm_gem_object_put(gobj);
@ -2900,13 +2901,12 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
amdgpu_sync_create(&sync_obj);
/* Validate BOs and map them to GPUVM (update VM page tables). */
/* Validate BOs managed by KFD */
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list) {
struct amdgpu_bo *bo = mem->bo;
uint32_t domain = mem->domain;
struct kfd_mem_attachment *attachment;
struct dma_resv_iter cursor;
struct dma_fence *fence;
@ -2931,6 +2931,25 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
goto validate_map_fail;
}
}
}
if (failed_size)
pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
/* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
* validations above would invalidate DMABuf imports again.
*/
ret = process_validate_vms(process_info, &exec.ticket);
if (ret) {
pr_debug("Validating VMs failed, ret: %d\n", ret);
goto validate_map_fail;
}
/* Update mappings managed by KFD. */
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list) {
struct kfd_mem_attachment *attachment;
list_for_each_entry(attachment, &mem->attachments, list) {
if (!attachment->is_mapped)
continue;
@ -2947,18 +2966,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
}
}
if (failed_size)
pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
/* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
* validations above would invalidate DMABuf imports again.
*/
ret = process_validate_vms(process_info, &exec.ticket);
if (ret) {
pr_debug("Validating VMs failed, ret: %d\n", ret);
goto validate_map_fail;
}
/* Update mappings not managed by KFD */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {

View File

@ -819,7 +819,7 @@ retry:
p->bytes_moved += ctx.bytes_moved;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
amdgpu_bo_in_cpu_visible_vram(bo))
amdgpu_res_cpu_visible(adev, bo->tbo.resource))
p->bytes_moved_vis += ctx.bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {

View File

@ -188,10 +188,11 @@ static void amdgpu_devcoredump_fw_info(struct amdgpu_device *adev,
adev->vpe.feature_version, adev->vpe.fw_version);
drm_printf(p, "\nVBIOS Information\n");
drm_printf(p, "name: %s\n", ctx->name);
drm_printf(p, "pn %s\n", ctx->vbios_pn);
drm_printf(p, "version: %s\n", ctx->vbios_ver_str);
drm_printf(p, "date: %s\n", ctx->date);
drm_printf(p, "vbios name : %s\n", ctx->name);
drm_printf(p, "vbios pn : %s\n", ctx->vbios_pn);
drm_printf(p, "vbios version : %d\n", ctx->version);
drm_printf(p, "vbios ver_str : %s\n", ctx->vbios_ver_str);
drm_printf(p, "vbios date : %s\n", ctx->date);
}
static ssize_t

View File

@ -350,6 +350,81 @@ int amdgpu_device_supports_baco(struct drm_device *dev)
return amdgpu_asic_supports_baco(adev);
}
void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
{
struct drm_device *dev;
int bamaco_support;
dev = adev_to_drm(adev);
adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
bamaco_support = amdgpu_device_supports_baco(dev);
switch (amdgpu_runtime_pm) {
case 2:
if (bamaco_support & MACO_SUPPORT) {
adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
dev_info(adev->dev, "Forcing BAMACO for runtime pm\n");
} else if (bamaco_support == BACO_SUPPORT) {
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
dev_info(adev->dev, "Requested mode BAMACO not available,fallback to use BACO\n");
}
break;
case 1:
if (bamaco_support & BACO_SUPPORT) {
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
dev_info(adev->dev, "Forcing BACO for runtime pm\n");
}
break;
case -1:
case -2:
if (amdgpu_device_supports_px(dev)) { /* enable PX as runtime mode */
adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
dev_info(adev->dev, "Using ATPX for runtime pm\n");
} else if (amdgpu_device_supports_boco(dev)) { /* enable boco as runtime mode */
adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
dev_info(adev->dev, "Using BOCO for runtime pm\n");
} else {
if (!bamaco_support)
goto no_runtime_pm;
switch (adev->asic_type) {
case CHIP_VEGA20:
case CHIP_ARCTURUS:
/* BACO are not supported on vega20 and arctrus */
break;
case CHIP_VEGA10:
/* enable BACO as runpm mode if noretry=0 */
if (!adev->gmc.noretry)
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
break;
default:
/* enable BACO as runpm mode on CI+ */
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
break;
}
if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) {
if (bamaco_support & MACO_SUPPORT) {
adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
dev_info(adev->dev, "Using BAMACO for runtime pm\n");
} else {
dev_info(adev->dev, "Using BACO for runtime pm\n");
}
}
}
break;
case 0:
dev_info(adev->dev, "runtime pm is manually disabled\n");
break;
default:
break;
}
no_runtime_pm:
if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE)
dev_info(adev->dev, "Runtime PM not available\n");
}
/**
* amdgpu_device_supports_smart_shift - Is the device dGPU with
* smart shift support
@ -1460,7 +1535,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
DRM_WARN("System can't access extended configuration space,please check!!\n");
DRM_WARN("System can't access extended configuration space, please check!!\n");
/* skip if the bios has already enabled large BAR */
if (adev->gmc.real_vram_size &&
@ -5282,7 +5357,9 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
/* Try reset handler method first */
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
amdgpu_reset_reg_dumps(tmp_adev);
if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags))
amdgpu_reset_reg_dumps(tmp_adev);
reset_context->reset_device_list = device_list_handle;
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
@ -5355,7 +5432,8 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
amdgpu_coredump(tmp_adev, vram_lost, reset_context);
if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags))
amdgpu_coredump(tmp_adev, vram_lost, reset_context);
if (vram_lost) {
DRM_INFO("VRAM is lost due to GPU reset!\n");

View File

@ -255,7 +255,6 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
uint64_t vram_size;
u32 msg;
int i, ret = 0;
int ip_discovery_ver = 0;
/* It can take up to a second for IFWI init to complete on some dGPUs,
* but generally it should be in the 60-100ms range. Normally this starts
@ -265,17 +264,13 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
* continue.
*/
ip_discovery_ver = RREG32(mmIP_DISCOVERY_VERSION);
if ((dev_is_removable(&adev->pdev->dev)) ||
(ip_discovery_ver == IP_DISCOVERY_V2) ||
(ip_discovery_ver == IP_DISCOVERY_V4)) {
for (i = 0; i < 1000; i++) {
msg = RREG32(mmMP0_SMN_C2PMSG_33);
if (msg & 0x80000000)
break;
msleep(1);
}
for (i = 0; i < 1000; i++) {
msg = RREG32(mmMP0_SMN_C2PMSG_33);
if (msg & 0x80000000)
break;
usleep_range(1000, 1100);
}
vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
if (vram_size) {
@ -1906,6 +1901,8 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 1):
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 3):
amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
break;
default:

View File

@ -2481,6 +2481,7 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
/* Use a common context, just need to make sure full reset is done */
set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
r = amdgpu_do_asic_reset(&device_list, &reset_context);
if (r) {

View File

@ -133,7 +133,6 @@ void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
{
struct drm_device *dev;
int bamaco_support = 0;
int r, acpi_status;
dev = adev_to_drm(adev);
@ -150,52 +149,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
goto out;
}
adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
if (amdgpu_device_supports_px(dev) &&
(amdgpu_runtime_pm != 0)) { /* enable PX as runtime mode */
adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
dev_info(adev->dev, "Using ATPX for runtime pm\n");
} else if (amdgpu_device_supports_boco(dev) &&
(amdgpu_runtime_pm != 0)) { /* enable boco as runtime mode */
adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
dev_info(adev->dev, "Using BOCO for runtime pm\n");
} else if (amdgpu_runtime_pm != 0) {
bamaco_support = amdgpu_device_supports_baco(dev);
if (!bamaco_support)
goto no_runtime_pm;
switch (adev->asic_type) {
case CHIP_VEGA20:
case CHIP_ARCTURUS:
/* enable BACO as runpm mode if runpm=1 */
if (amdgpu_runtime_pm > 0)
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
break;
case CHIP_VEGA10:
/* enable BACO as runpm mode if noretry=0 */
if (!adev->gmc.noretry)
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
break;
default:
/* enable BACO as runpm mode on CI+ */
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
break;
}
if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) {
if (bamaco_support & MACO_SUPPORT) {
adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
dev_info(adev->dev, "Using BAMACO for runtime pm\n");
} else {
dev_info(adev->dev, "Using BACO for runtime pm\n");
}
}
}
no_runtime_pm:
if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE)
dev_info(adev->dev, "NO pm mode for runtime pm\n");
amdgpu_device_detect_runtime_pm_mode(adev);
/* Call ACPI methods: require modeset init
* but failure is not fatal

View File

@ -623,8 +623,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
bo->tbo.resource->mem_type == TTM_PL_VRAM &&
amdgpu_bo_in_cpu_visible_vram(bo))
amdgpu_res_cpu_visible(adev, bo->tbo.resource))
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved);
else
@ -1278,23 +1277,25 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
struct amdgpu_mem_stats *stats)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_resource *res = bo->tbo.resource;
uint64_t size = amdgpu_bo_size(bo);
struct drm_gem_object *obj;
unsigned int domain;
bool shared;
/* Abort if the BO doesn't currently have a backing store */
if (!bo->tbo.resource)
if (!res)
return;
obj = &bo->tbo.base;
shared = drm_gem_object_is_shared_for_memory_stats(obj);
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
domain = amdgpu_mem_type_to_domain(res->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
stats->vram += size;
if (amdgpu_bo_in_cpu_visible_vram(bo))
if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
stats->visible_vram += size;
if (shared)
stats->vram_shared += size;
@ -1395,10 +1396,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
if (bo->resource->mem_type != TTM_PL_VRAM)
return 0;
if (amdgpu_bo_in_cpu_visible_vram(abo))
if (amdgpu_res_cpu_visible(adev, bo->resource))
return 0;
/* Can't move a pinned BO to visible VRAM */
@ -1421,7 +1419,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* this should never happen */
if (bo->resource->mem_type == TTM_PL_VRAM &&
!amdgpu_bo_in_cpu_visible_vram(abo))
!amdgpu_res_cpu_visible(adev, bo->resource))
return VM_FAULT_SIGBUS;
ttm_bo_move_to_lru_tail_unlocked(bo);
@ -1585,6 +1583,7 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
*/
u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct dma_buf_attachment *attachment;
struct dma_buf *dma_buf;
const char *placement;
@ -1593,10 +1592,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
if (dma_resv_trylock(bo->tbo.base.resv)) {
unsigned int domain;
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
if (amdgpu_bo_in_cpu_visible_vram(bo))
if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
placement = "VRAM VISIBLE";
else
placement = "VRAM";

View File

@ -250,28 +250,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
}
/**
* amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
*/
static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_res_cursor cursor;
if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
return false;
amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
while (cursor.remaining) {
if (cursor.start < adev->gmc.visible_vram_size)
return true;
amdgpu_res_next(&cursor, cursor.size);
}
return false;
}
/**
* amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
*/

View File

@ -2265,6 +2265,15 @@ static int psp_hw_start(struct psp_context *psp)
}
}
if ((is_psp_fw_valid(psp->ipkeymgr_drv)) &&
(psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) {
ret = psp_bootloader_load_ipkeymgr_drv(psp);
if (ret) {
dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n");
return ret;
}
}
if ((is_psp_fw_valid(psp->sos)) &&
(psp->funcs->bootloader_load_sos != NULL)) {
ret = psp_bootloader_load_sos(psp);
@ -3280,6 +3289,12 @@ static int parse_sos_bin_descriptor(struct psp_context *psp,
psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes);
psp->ras_drv.start_addr = ucode_start_addr;
break;
case PSP_FW_TYPE_PSP_IPKEYMGR_DRV:
psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version);
psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version);
psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes);
psp->ipkeymgr_drv.start_addr = ucode_start_addr;
break;
default:
dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
break;

View File

@ -73,8 +73,10 @@ enum psp_bootloader_cmd {
PSP_BL__LOAD_KEY_DATABASE = 0x80000,
PSP_BL__LOAD_SOCDRV = 0xB0000,
PSP_BL__LOAD_DBGDRV = 0xC0000,
PSP_BL__LOAD_HADDRV = PSP_BL__LOAD_DBGDRV,
PSP_BL__LOAD_INTFDRV = 0xD0000,
PSP_BL__LOAD_RASDRV = 0xE0000,
PSP_BL__LOAD_RASDRV = 0xE0000,
PSP_BL__LOAD_IPKEYMGRDRV = 0xF0000,
PSP_BL__DRAM_LONG_TRAIN = 0x100000,
PSP_BL__DRAM_SHORT_TRAIN = 0x200000,
PSP_BL__LOAD_TOS_SPL_TABLE = 0x10000000,
@ -117,6 +119,7 @@ struct psp_funcs {
int (*bootloader_load_intf_drv)(struct psp_context *psp);
int (*bootloader_load_dbg_drv)(struct psp_context *psp);
int (*bootloader_load_ras_drv)(struct psp_context *psp);
int (*bootloader_load_ipkeymgr_drv)(struct psp_context *psp);
int (*bootloader_load_sos)(struct psp_context *psp);
int (*ring_create)(struct psp_context *psp,
enum psp_ring_type ring_type);
@ -336,6 +339,7 @@ struct psp_context {
struct psp_bin_desc intf_drv;
struct psp_bin_desc dbg_drv;
struct psp_bin_desc ras_drv;
struct psp_bin_desc ipkeymgr_drv;
/* tmr buffer */
struct amdgpu_bo *tmr_bo;
@ -424,6 +428,9 @@ struct amdgpu_psp_funcs {
#define psp_bootloader_load_ras_drv(psp) \
((psp)->funcs->bootloader_load_ras_drv ? \
(psp)->funcs->bootloader_load_ras_drv((psp)) : 0)
#define psp_bootloader_load_ipkeymgr_drv(psp) \
((psp)->funcs->bootloader_load_ipkeymgr_drv ? \
(psp)->funcs->bootloader_load_ipkeymgr_drv((psp)) : 0)
#define psp_bootloader_load_sos(psp) \
((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \

View File

@ -32,6 +32,7 @@ enum AMDGPU_RESET_FLAGS {
AMDGPU_NEED_FULL_RESET = 0,
AMDGPU_SKIP_HW_RESET = 1,
AMDGPU_SKIP_COREDUMP = 2,
};
struct amdgpu_reset_context {

View File

@ -133,7 +133,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
amdgpu_bo_in_cpu_visible_vram(abo)) {
amdgpu_res_cpu_visible(adev, bo->resource)) {
/* Try evicting to the CPU inaccessible part of VRAM
* first, but only set GTT as busy placement, so this
@ -403,40 +403,55 @@ error:
return r;
}
/**
* amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
* @adev: amdgpu device
* @res: the resource to check
*
* Returns: true if the full resource is CPU visible, false otherwise.
*/
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
struct ttm_resource *res)
{
struct amdgpu_res_cursor cursor;
if (!res)
return false;
if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
res->mem_type == AMDGPU_PL_PREEMPT)
return true;
if (res->mem_type != TTM_PL_VRAM)
return false;
amdgpu_res_first(res, 0, res->size, &cursor);
while (cursor.remaining) {
if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size)
return false;
amdgpu_res_next(&cursor, cursor.size);
}
return true;
}
/*
* amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
* amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
*
* Called by amdgpu_bo_move()
*/
static bool amdgpu_mem_visible(struct amdgpu_device *adev,
struct ttm_resource *mem)
static bool amdgpu_res_copyable(struct amdgpu_device *adev,
struct ttm_resource *mem)
{
u64 mem_size = (u64)mem->size;
struct amdgpu_res_cursor cursor;
u64 end;
if (mem->mem_type == TTM_PL_SYSTEM ||
mem->mem_type == TTM_PL_TT)
return true;
if (mem->mem_type != TTM_PL_VRAM)
if (!amdgpu_res_cpu_visible(adev, mem))
return false;
amdgpu_res_first(mem, 0, mem_size, &cursor);
end = cursor.start + cursor.size;
while (cursor.remaining) {
amdgpu_res_next(&cursor, cursor.size);
/* ttm_resource_ioremap only supports contiguous memory */
if (mem->mem_type == TTM_PL_VRAM &&
!(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
return false;
if (!cursor.remaining)
break;
/* ttm_resource_ioremap only supports contiguous memory */
if (end != cursor.start)
return false;
end = cursor.start + cursor.size;
}
return end <= adev->gmc.visible_vram_size;
return true;
}
/*
@ -529,8 +544,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
if (r) {
/* Check that all memory is CPU accessible */
if (!amdgpu_mem_visible(adev, old_mem) ||
!amdgpu_mem_visible(adev, new_mem)) {
if (!amdgpu_res_copyable(adev, old_mem) ||
!amdgpu_res_copyable(adev, new_mem)) {
pr_err("Move buffer fallback to memcpy unavailable\n");
return r;
}

View File

@ -139,6 +139,9 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start);
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
struct ttm_resource *res);
int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_fini(struct amdgpu_device *adev);
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,

View File

@ -125,6 +125,7 @@ enum psp_fw_type {
PSP_FW_TYPE_PSP_INTF_DRV,
PSP_FW_TYPE_PSP_DBG_DRV,
PSP_FW_TYPE_PSP_RAS_DRV,
PSP_FW_TYPE_PSP_IPKEYMGR_DRV,
PSP_FW_TYPE_MAX_INDEX,
};

View File

@ -1647,6 +1647,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_map(bo_va, mapping);
}
/* Validate operation parameters to prevent potential abuse */
static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
struct amdgpu_bo *bo,
uint64_t saddr,
uint64_t offset,
uint64_t size)
{
uint64_t tmp, lpfn;
if (saddr & AMDGPU_GPU_PAGE_MASK
|| offset & AMDGPU_GPU_PAGE_MASK
|| size & AMDGPU_GPU_PAGE_MASK)
return -EINVAL;
if (check_add_overflow(saddr, size, &tmp)
|| check_add_overflow(offset, size, &tmp)
|| size == 0 /* which also leads to end < begin */)
return -EINVAL;
/* make sure object fit at this offset */
if (bo && offset + size > amdgpu_bo_size(bo))
return -EINVAL;
/* Ensure last pfn not exceed max_pfn */
lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
if (lpfn >= adev->vm_manager.max_pfn)
return -EINVAL;
return 0;
}
/**
* amdgpu_vm_bo_map - map bo inside a vm
*
@ -1673,21 +1704,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
int r;
/* validate the parameters */
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
return -EINVAL;
if (saddr + size <= saddr || offset + size <= offset)
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if ((bo && offset + size > amdgpu_bo_size(bo)) ||
(eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
if (r)
return r;
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
if (tmp) {
@ -1740,17 +1764,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
uint64_t eaddr;
int r;
/* validate the parameters */
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
return -EINVAL;
if (saddr + size <= saddr || offset + size <= offset)
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if ((bo && offset + size > amdgpu_bo_size(bo)) ||
(eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
if (r)
return r;
/* Allocate all the needed memory */
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
@ -1764,7 +1780,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
}
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
mapping->start = saddr;
mapping->last = eaddr;
@ -1851,10 +1867,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
LIST_HEAD(removed);
uint64_t eaddr;
int r;
r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
if (r)
return r;
eaddr = saddr + size - 1;
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
/* Allocate all the needed memory */
before = kzalloc(sizeof(*before), GFP_KERNEL);

View File

@ -630,7 +630,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
{
u32 mask, inst_mask = adev->sdma.sdma_mask;
u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask;
int ret, i;
/* generally 1 AID supports 4 instances */
@ -642,7 +642,9 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
if ((inst_mask & mask) == mask)
avail_inst = inst_mask & mask;
if (avail_inst == mask || avail_inst == 0x3 ||
avail_inst == 0xc)
adev->aid_mask |= (1 << i);
}

View File

@ -4506,14 +4506,11 @@ static int gfx_v11_0_soft_reset(void *handle)
gfx_v11_0_set_safe_mode(adev, 0);
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i);
tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j);
tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k);
WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
soc21_grbm_select(adev, i, k, j, 0);
WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
@ -4523,16 +4520,14 @@ static int gfx_v11_0_soft_reset(void *handle)
for (i = 0; i < adev->gfx.me.num_me; ++i) {
for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i);
tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j);
tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k);
WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
soc21_grbm_select(adev, i, k, j, 0);
WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1);
}
}
}
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
/* Try to acquire the gfx mutex before access to CP_VMID_RESET */
r = gfx_v11_0_request_gfx_index_mutex(adev, 1);

View File

@ -346,6 +346,21 @@ static int ih_v6_0_irq_init(struct amdgpu_device *adev)
DELAY, 3);
WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp);
/* Redirect the interrupts to IH RB1 for dGPU */
if (adev->irq.ih1.ring_size) {
tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX);
tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_INDEX, INDEX, 0);
WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX, tmp);
tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA);
tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, CLIENT_ID, 0xa);
tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, SOURCE_ID, 0x0);
tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA,
SOURCE_ID_MATCH_ENABLE, 0x1);
WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA, tmp);
}
pci_set_master(adev->pdev);
/* enable interrupts */
@ -549,8 +564,15 @@ static int ih_v6_0_sw_init(void *handle)
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
adev->irq.ih1.ring_size = 0;
adev->irq.ih2.ring_size = 0;
if (!(adev->flags & AMD_IS_APU)) {
r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, IH_RING_SIZE,
use_bus_addr);
if (r)
return r;
adev->irq.ih1.use_doorbell = true;
adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
}
/* initialize ih control register offset */
ih_v6_0_init_register_offset(adev);

View File

@ -346,6 +346,21 @@ static int ih_v6_1_irq_init(struct amdgpu_device *adev)
DELAY, 3);
WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp);
/* Redirect the interrupts to IH RB1 for dGPU */
if (adev->irq.ih1.ring_size) {
tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX);
tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_INDEX, INDEX, 0);
WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX, tmp);
tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA);
tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, CLIENT_ID, 0xa);
tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, SOURCE_ID, 0x0);
tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA,
SOURCE_ID_MATCH_ENABLE, 0x1);
WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA, tmp);
}
pci_set_master(adev->pdev);
/* enable interrupts */
@ -550,8 +565,15 @@ static int ih_v6_1_sw_init(void *handle)
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
adev->irq.ih1.ring_size = 0;
adev->irq.ih2.ring_size = 0;
if (!(adev->flags & AMD_IS_APU)) {
r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, IH_RING_SIZE,
use_bus_addr);
if (r)
return r;
adev->irq.ih1.use_doorbell = true;
adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
}
/* initialize ih control register offset */
ih_v6_1_init_register_offset(adev);

View File

@ -111,7 +111,7 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
struct amdgpu_device *adev = mes->adev;
struct amdgpu_ring *ring = &mes->ring;
unsigned long flags;
signed long timeout = adev->usec_timeout;
signed long timeout = 3000000; /* 3000 ms */
if (amdgpu_emu_mode) {
timeout *= 100;

View File

@ -446,8 +446,6 @@ static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev,
amdgpu_virt_fini_data_exchange(adev);
xgpu_nv_send_access_requests_with_param(adev,
IDH_RAS_POISON, block, 0, 0);
if (block != AMDGPU_RAS_BLOCK__SDMA)
amdgpu_virt_init_data_exchange(adev);
}
}

View File

@ -169,7 +169,8 @@ static int psp_v14_0_bootloader_load_intf_drv(struct psp_context *psp)
static int psp_v14_0_bootloader_load_dbg_drv(struct psp_context *psp)
{
return psp_v14_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_DBGDRV);
/* dbg_drv was renamed to had_drv in psp v14 */
return psp_v14_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_HADDRV);
}
static int psp_v14_0_bootloader_load_ras_drv(struct psp_context *psp)
@ -177,6 +178,10 @@ static int psp_v14_0_bootloader_load_ras_drv(struct psp_context *psp)
return psp_v14_0_bootloader_load_component(psp, &psp->ras_drv, PSP_BL__LOAD_RASDRV);
}
static int psp_v14_0_bootloader_load_ipkeymgr_drv(struct psp_context *psp)
{
return psp_v14_0_bootloader_load_component(psp, &psp->ipkeymgr_drv, PSP_BL__LOAD_IPKEYMGRDRV);
}
static int psp_v14_0_bootloader_load_sos(struct psp_context *psp)
{
@ -653,6 +658,7 @@ static const struct psp_funcs psp_v14_0_funcs = {
.bootloader_load_intf_drv = psp_v14_0_bootloader_load_intf_drv,
.bootloader_load_dbg_drv = psp_v14_0_bootloader_load_dbg_drv,
.bootloader_load_ras_drv = psp_v14_0_bootloader_load_ras_drv,
.bootloader_load_ipkeymgr_drv = psp_v14_0_bootloader_load_ipkeymgr_drv,
.bootloader_load_sos = psp_v14_0_bootloader_load_sos,
.ring_create = psp_v14_0_ring_create,
.ring_stop = psp_v14_0_ring_stop,

View File

@ -144,7 +144,7 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
uint16_t pasid, uint16_t client_id)
{
enum amdgpu_ras_block block = 0;
int old_poison, ret = -EINVAL;
int old_poison;
uint32_t reset = 0;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
@ -163,17 +163,13 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
case SOC15_IH_CLIENTID_SE2SH:
case SOC15_IH_CLIENTID_SE3SH:
case SOC15_IH_CLIENTID_UTCL2:
ret = kfd_dqm_evict_pasid(dev->dqm, pasid);
block = AMDGPU_RAS_BLOCK__GFX;
if (ret)
reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
break;
case SOC15_IH_CLIENTID_VMC:
case SOC15_IH_CLIENTID_VMC1:
ret = kfd_dqm_evict_pasid(dev->dqm, pasid);
block = AMDGPU_RAS_BLOCK__MMHUB;
if (ret)
reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
break;
case SOC15_IH_CLIENTID_SDMA0:
case SOC15_IH_CLIENTID_SDMA1:
@ -184,22 +180,15 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
break;
default:
break;
dev_warn(dev->adev->dev,
"client %d does not support poison consumption\n", client_id);
return;
}
kfd_signal_poison_consumed_event(dev, pasid);
/* resetting queue passes, do page retirement without gpu reset
* resetting queue fails, fallback to gpu reset solution
*/
if (!ret)
dev_warn(dev->adev->dev,
"RAS poison consumption, unmap queue flow succeeded: client id %d\n",
client_id);
else
dev_warn(dev->adev->dev,
"RAS poison consumption, fall back to gpu reset flow: client id %d\n",
client_id);
dev_warn(dev->adev->dev,
"poison is consumed by client %d, kick off gpu reset flow\n", client_id);
amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, reset);
}

View File

@ -1922,6 +1922,8 @@ static int signal_eviction_fence(struct kfd_process *p)
rcu_read_lock();
ef = dma_fence_get_rcu_safe(&p->ef);
rcu_read_unlock();
if (!ef)
return -EINVAL;
ret = dma_fence_signal(ef);
dma_fence_put(ef);
@ -1949,10 +1951,9 @@ static void evict_process_worker(struct work_struct *work)
* they are responsible stopping the queues and scheduling
* the restore work.
*/
if (!signal_eviction_fence(p))
queue_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
else
if (signal_eviction_fence(p) ||
mod_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
kfd_process_restore_queues(p);
pr_debug("Finished evicting pasid 0x%x\n", p->pasid);

View File

@ -1230,6 +1230,15 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
break;
}
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
hw_params.ips_sequential_ono = adev->external_rev_id > 0x10;
break;
default:
break;
}
status = dmub_srv_hw_init(dmub_srv, &hw_params);
if (status != DMUB_STATUS_OK) {
DRM_ERROR("Error initializing DMUB HW: %d\n", status);
@ -3037,6 +3046,7 @@ static int dm_resume(void *handle)
dc_stream_release(dm_new_crtc_state->stream);
dm_new_crtc_state->stream = NULL;
}
dm_new_crtc_state->base.color_mgmt_changed = true;
}
for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {

View File

@ -272,7 +272,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}
if (asic_id.chip_id == DEVICE_ID_NV_13FE) {
if (ctx->dce_version == DCN_VERSION_2_01) {
dcn201_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}

View File

@ -29,6 +29,7 @@
#include "dcn20/dcn20_clk_mgr.h"
#include "dce100/dce_clk_mgr.h"
#include "dcn31/dcn31_clk_mgr.h"
#include "dcn32/dcn32_clk_mgr.h"
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
@ -829,7 +830,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
dmcu->funcs->set_psr_wait_loop(dmcu,
clk_mgr_base->clks.dispclk_khz / 1000 / 7);
if (dc->config.enable_auto_dpm_test_logs && safe_to_lower) {
if (dc->config.enable_auto_dpm_test_logs) {
dcn32_auto_dpm_test_log(new_clocks, clk_mgr, context);
}
}

View File

@ -3446,6 +3446,7 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
if (srf_updates[i].surface->flip_immediate)
continue;
update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
sizeof(flip_addr->dirty_rects));
@ -5042,8 +5043,13 @@ void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
void dc_power_down_on_boot(struct dc *dc)
{
if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
dc->hwss.power_down_on_boot)
dc->hwss.power_down_on_boot) {
if (dc->caps.ips_support)
dc_exit_ips_for_hw_access(dc);
dc->hwss.power_down_on_boot(dc);
}
}
void dc_set_power_state(

View File

@ -1500,9 +1500,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
return false;
}
pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
pipe_ctx->plane_state->format);
/* Timing borders are part of vactive that we are also supposed to skip in addition
* to any stream dst offset. Since dm logic assumes dst is in addressable
* space we need to add the left and top borders to dst offsets temporarily.
@ -1514,6 +1511,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
/* Calculate H and V active size */
pipe_ctx->plane_res.scl_data.h_active = odm_slice_rec.width;
pipe_ctx->plane_res.scl_data.v_active = odm_slice_rec.height;
pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
pipe_ctx->plane_state->format);
/* depends on h_active */
calculate_recout(pipe_ctx);

View File

@ -191,7 +191,7 @@ static void init_state(struct dc *dc, struct dc_state *state)
struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params)
{
#ifdef CONFIG_DRM_AMD_DC_FP
struct dml2_configuration_options dml2_opt = dc->dml2_options;
struct dml2_configuration_options *dml2_opt = &dc->dml2_options;
#endif
struct dc_state *state = kvzalloc(sizeof(struct dc_state),
GFP_KERNEL);
@ -205,11 +205,11 @@ struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *p
#ifdef CONFIG_DRM_AMD_DC_FP
if (dc->debug.using_dml2) {
dml2_opt.use_clock_dc_limits = false;
dml2_create(dc, &dml2_opt, &state->bw_ctx.dml2);
dml2_opt->use_clock_dc_limits = false;
dml2_create(dc, dml2_opt, &state->bw_ctx.dml2);
dml2_opt.use_clock_dc_limits = true;
dml2_create(dc, &dml2_opt, &state->bw_ctx.dml2_dc_power_source);
dml2_opt->use_clock_dc_limits = true;
dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source);
}
#endif

View File

@ -495,7 +495,7 @@ bool dc_stream_remove_writeback(struct dc *dc,
struct dc_stream_state *stream,
uint32_t dwb_pipe_inst)
{
int i = 0, j = 0;
unsigned int i, j;
if (stream == NULL) {
dm_error("DC: dc_stream is NULL!\n");
return false;

View File

@ -53,7 +53,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
#define DC_VER "3.2.280"
#define DC_VER "3.2.281"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@ -309,12 +309,12 @@ struct dc_dcc_setting {
unsigned int max_compressed_blk_size;
unsigned int max_uncompressed_blk_size;
bool independent_64b_blks;
//These bitfields to be used starting with DCN
//These bitfields to be used starting with DCN 3.0
struct {
uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN (the worst compression case)
uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN
uint32_t dcc_256_128_128 : 1; //available starting with DCN
uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN (the best compression case)
uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN 3.0 (the worst compression case)
uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN 3.0
uint32_t dcc_256_128_128 : 1; //available starting with DCN 3.0
uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN 3.0 (the best compression case)
} dcc_controls;
};
@ -1003,9 +1003,9 @@ struct dc_debug_options {
unsigned int static_screen_wait_frames;
bool force_chroma_subsampling_1tap;
bool disable_422_left_edge_pixel;
unsigned int force_cositing;
};
struct gpu_info_soc_bounding_box_v1_0;
/* Generic structure that can be used to query properties of DC. More fields
* can be added as required.
@ -1285,6 +1285,7 @@ struct dc_plane_state {
struct tg_color visual_confirm_color;
bool is_statically_allocated;
enum chroma_cositing cositing;
};
struct dc_plane_info {
@ -1303,6 +1304,7 @@ struct dc_plane_info {
int global_alpha_value;
bool input_csc_enabled;
int layer_index;
enum chroma_cositing cositing;
};
#include "dc_stream.h"

View File

@ -738,6 +738,13 @@ enum scanning_type {
SCANNING_TYPE_UNDEFINED
};
enum chroma_cositing {
CHROMA_COSITING_NONE,
CHROMA_COSITING_LEFT,
CHROMA_COSITING_TOPLEFT,
CHROMA_COSITING_COUNT
};
struct dc_crtc_timing_flags {
uint32_t INTERLACE :1;
uint32_t HSYNC_POSITIVE_POLARITY :1; /* when set to 1,

View File

@ -1050,6 +1050,8 @@ union replay_error_status {
struct replay_config {
/* Replay feature is supported */
bool replay_supported;
/* Replay caps support DPCD & EDID caps*/
bool replay_cap_support;
/* Power opt flags that are supported */
unsigned int replay_power_opt_supported;
/* SMU optimization is supported */

View File

@ -22,9 +22,6 @@
* Authors: AMD
*
*/
#include <linux/delay.h>
#include "resource.h"
#include "dce_i2c.h"
#include "dce_i2c_hw.h"
@ -315,9 +312,6 @@ static bool setup_engine(
/* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/
REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1);
/* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/
REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1);
/*set SW requested I2c speed to default, if API calls in it will be override later*/
set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz);

View File

@ -167,7 +167,6 @@ struct dcn10_link_enc_registers {
uint32_t DIO_LINKD_CNTL;
uint32_t DIO_LINKE_CNTL;
uint32_t DIO_LINKF_CNTL;
uint32_t DIG_FIFO_CTRL0;
uint32_t DIO_CLK_CNTL;
uint32_t DIG_BE_CLK_CNTL;
};
@ -475,9 +474,6 @@ struct dcn10_link_enc_registers {
type HPO_DP_ENC_SEL;\
type HPO_HDMI_ENC_SEL
#define DCN32_LINK_ENCODER_REG_FIELD_LIST(type) \
type DIG_FIFO_OUTPUT_PIXEL_MODE
#define DCN35_LINK_ENCODER_REG_FIELD_LIST(type) \
type DIG_BE_ENABLE;\
type DIG_RB_SWITCH_EN;\
@ -512,7 +508,6 @@ struct dcn10_link_enc_shift {
DCN20_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
DCN30_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
DCN31_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
DCN32_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
DCN35_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
};
@ -521,7 +516,6 @@ struct dcn10_link_enc_mask {
DCN20_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
DCN30_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
DCN31_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
DCN32_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
DCN35_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
};

View File

@ -168,6 +168,10 @@ static void opp1_set_pixel_encoding(
case PIXEL_ENCODING_RGB:
case PIXEL_ENCODING_YCBCR444:
REG_UPDATE_3(FMT_CONTROL,
FMT_PIXEL_ENCODING, 0,
FMT_SUBSAMPLING_MODE, 0,
FMT_CBCR_BIT_REDUCTION_BYPASS, 0);
REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0);
break;
case PIXEL_ENCODING_YCBCR422:
@ -177,7 +181,10 @@ static void opp1_set_pixel_encoding(
FMT_CBCR_BIT_REDUCTION_BYPASS, 0);
break;
case PIXEL_ENCODING_YCBCR420:
REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 2);
REG_UPDATE_3(FMT_CONTROL,
FMT_PIXEL_ENCODING, 2,
FMT_SUBSAMPLING_MODE, 2,
FMT_CBCR_BIT_REDUCTION_BYPASS, 1);
break;
default:
break;

View File

@ -79,6 +79,8 @@
OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh), \
OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh), \
OPP_SF(FMT0_FMT_CONTROL, FMT_PIXEL_ENCODING, mask_sh), \
OPP_SF(FMT0_FMT_CONTROL, FMT_SUBSAMPLING_MODE, mask_sh), \
OPP_SF(FMT0_FMT_CONTROL, FMT_CBCR_BIT_REDUCTION_BYPASS, mask_sh), \
OPP_SF(FMT0_FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh), \
OPP_SF(FMT0_FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh), \
OPP_SF(FMT0_FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh), \

View File

@ -127,7 +127,6 @@ struct dcn10_stream_enc_registers {
uint32_t AFMT_60958_1;
uint32_t AFMT_60958_2;
uint32_t DIG_FE_CNTL;
uint32_t DIG_FE_CNTL2;
uint32_t DIG_FIFO_STATUS;
uint32_t DP_MSE_RATE_CNTL;
uint32_t DP_MSE_RATE_UPDATE;
@ -570,7 +569,7 @@ struct dcn10_stream_enc_registers {
type DP_SEC_GSP11_ENABLE;\
type DP_SEC_GSP11_LINE_NUM
#define SE_REG_FIELD_LIST_DCN3_2(type) \
#define SE_REG_FIELD_LIST_DCN3_1_COMMON(type) \
type DIG_FIFO_OUTPUT_PIXEL_MODE;\
type DP_PIXEL_PER_CYCLE_PROCESSING_MODE;\
type DIG_SYMCLK_FE_ON;\
@ -599,7 +598,7 @@ struct dcn10_stream_encoder_shift {
uint8_t HDMI_ACP_SEND;
SE_REG_FIELD_LIST_DCN2_0(uint8_t);
SE_REG_FIELD_LIST_DCN3_0(uint8_t);
SE_REG_FIELD_LIST_DCN3_2(uint8_t);
SE_REG_FIELD_LIST_DCN3_1_COMMON(uint8_t);
SE_REG_FIELD_LIST_DCN3_5_COMMON(uint8_t);
};
@ -608,7 +607,7 @@ struct dcn10_stream_encoder_mask {
uint32_t HDMI_ACP_SEND;
SE_REG_FIELD_LIST_DCN2_0(uint32_t);
SE_REG_FIELD_LIST_DCN3_0(uint32_t);
SE_REG_FIELD_LIST_DCN3_2(uint32_t);
SE_REG_FIELD_LIST_DCN3_1_COMMON(uint32_t);
SE_REG_FIELD_LIST_DCN3_5_COMMON(uint32_t);
};
@ -667,9 +666,6 @@ void enc1_stream_encoder_send_immediate_sdp_message(
void enc1_stream_encoder_stop_dp_info_packets(
struct stream_encoder *enc);
void enc1_stream_encoder_reset_fifo(
struct stream_encoder *enc);
void enc1_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc);

View File

@ -147,7 +147,7 @@
uint32_t DCN_CUR1_TTU_CNTL1;\
uint32_t VMID_SETTINGS_0
/*shared with dcn3.x*/
#define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \
DCN2_HUBP_REG_COMMON_VARIABLE_LIST; \
uint32_t FLIP_PARAMETERS_3;\

View File

@ -395,9 +395,12 @@ static void mpc20_program_ogam_pwl(
MPCC_OGAM_LUT_DATA, rgb[i].delta_green_reg);
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0,
MPCC_OGAM_LUT_DATA, rgb[i].delta_blue_reg);
}
REG_SEQ_SUBMIT();
PERF_TRACE();
REG_SEQ_WAIT_DONE();
PERF_TRACE();
}
static void apply_DEDCN20_305_wa(struct mpc *mpc, int mpcc_id,
@ -501,11 +504,6 @@ void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id)
ASSERT(!mpc_disabled);
ASSERT(!mpc_idle);
}
REG_SEQ_SUBMIT();
PERF_TRACE();
REG_SEQ_WAIT_DONE();
PERF_TRACE();
}
static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst)

View File

@ -77,6 +77,7 @@ static void hubp201_program_requestor(struct hubp *hubp,
MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode,
CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode);
/* no need to program PTE */
REG_SET_5(DCHUBP_REQ_SIZE_CONFIG, 0,
CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size,
MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size,
@ -99,6 +100,10 @@ static void hubp201_setup(
struct _vcs_dpi_display_rq_regs_st *rq_regs,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
{
/*
* otg is locked when this func is called. Register are double buffered.
* disable the requestors is not needed
*/
hubp2_vready_at_or_After_vsync(hubp, pipe_dest);
hubp201_program_requestor(hubp, rq_regs);
hubp201_program_deadline(hubp, dlg_attr, ttu_attr);

View File

@ -30,6 +30,10 @@
#define DPCS_DCN201_MASK_SH_LIST(mask_sh)\
DPCS_MASK_SH_LIST(mask_sh),\
LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, VCO_LD_VAL_OVRD, mask_sh),\
LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, VCO_LD_VAL_OVRD_EN, mask_sh),\
LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, REF_LD_VAL_OVRD, mask_sh),\
LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, REF_LD_VAL_OVRD_EN, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DP4, mask_sh),\
@ -44,7 +48,15 @@
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_REF_CLK_EN, mask_sh)
#define DPCS_DCN201_REG_LIST(id) \
DPCS_DCN2_CMN_REG_LIST(id)
DPCS_DCN2_CMN_REG_LIST(id), \
SRI_IX(RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \
SRI_IX(RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \
SRI_IX(RAWLANE1_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \
SRI_IX(RAWLANE1_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \
SRI_IX(RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \
SRI_IX(RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \
SRI_IX(RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \
SRI_IX(RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id)
void dcn201_link_encoder_construct(
struct dcn20_link_encoder *enc20,

View File

@ -29,13 +29,6 @@
#include "dcn20/dcn20_dccg.h"
#define DCCG_REG_LIST_DCN3AG() \
DCCG_COMMON_REG_LIST_DCN_BASE(),\
SR(PHYASYMCLK_CLOCK_CNTL),\
SR(PHYBSYMCLK_CLOCK_CNTL),\
SR(PHYCSYMCLK_CLOCK_CNTL)
#define DCCG_REG_LIST_DCN30() \
DCCG_REG_LIST_DCN2(),\
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 2),\
@ -46,17 +39,6 @@
SR(PHYBSYMCLK_CLOCK_CNTL),\
SR(PHYCSYMCLK_CLOCK_CNTL)
#define DCCG_MASK_SH_LIST_DCN3AG(mask_sh) \
DCCG_MASK_SH_LIST_DCN2_1(mask_sh),\
DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\
DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\
DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\
DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\
DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\
DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_SRC_SEL, mask_sh),\
DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, mask_sh),\
DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_SRC_SEL, mask_sh)
#define DCCG_MASK_SH_LIST_DCN3(mask_sh) \
DCCG_MASK_SH_LIST_DCN2(mask_sh),\
DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\

View File

@ -29,7 +29,6 @@
#include "reg_helper.h"
#include "hw_shared.h"
#include "dc.h"
#include "core_types.h"
#define DC_LOGGER \
enc1->base.ctx->logger

View File

@ -251,9 +251,7 @@ static const struct dwbc_funcs dcn30_dwbc_funcs = {
.set_fc_enable = dwb3_set_fc_enable,
.set_stereo = dwb3_set_stereo,
.set_new_content = dwb3_set_new_content,
.dwb_program_output_csc = NULL,
.dwb_ogam_set_input_transfer_func = dwb3_ogam_set_input_transfer_func, //TODO: rename
.dwb_set_scaler = NULL,
};
void dcn30_dwbc_construct(struct dcn30_dwbc *dwbc30,

View File

@ -63,6 +63,7 @@ static const struct hubbub_funcs hubbub301_funcs = {
.verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
.force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes,
.force_pstate_change_control = hubbub3_force_pstate_change_control,
.init_watermarks = hubbub3_init_watermarks,
.hubbub_read_state = hubbub2_read_state,
};

View File

@ -248,14 +248,12 @@ void dcn32_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
enc10->base.features = *enc_features;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.transmitter = init_data->transmitter;

View File

@ -183,6 +183,8 @@ void dcn35_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
@ -237,8 +239,6 @@ void dcn35_link_encoder_construct(
}
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
if (bp_funcs->get_connector_speed_cap_info)
result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios,

View File

@ -395,7 +395,9 @@ void dpp3_set_cursor_attributes(
if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
cur_rom_en = 1;
if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
cur_rom_en = 1;
}
}
REG_UPDATE_3(CURSOR0_CONTROL,

View File

@ -116,10 +116,10 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations,
.update_dsc_pg = dcn32_update_dsc_pg,
.calc_blocks_to_gate = dcn351_calc_blocks_to_gate,
.calc_blocks_to_ungate = dcn351_calc_blocks_to_ungate,
.hw_block_power_up = dcn351_hw_block_power_up,
.hw_block_power_down = dcn351_hw_block_power_down,
.calc_blocks_to_gate = dcn35_calc_blocks_to_gate,
.calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate,
.hw_block_power_up = dcn35_hw_block_power_up,
.hw_block_power_down = dcn35_hw_block_power_down,
.root_clock_control = dcn35_root_clock_control,
};

View File

@ -2547,7 +2547,7 @@ struct resource_pool *dcn32_create_resource_pool(
* full update which delays the flip for 1 frame. If we use the original pipe
* we don't have to toggle its power. So we can flip faster.
*/
static int find_optimal_free_pipe_as_secondary_dpp_pipe(
int dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe(
const struct resource_context *cur_res_ctx,
struct resource_context *new_res_ctx,
const struct resource_pool *pool,
@ -2730,7 +2730,7 @@ struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe(
return dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
new_ctx, pool, opp_head_pipe->stream, opp_head_pipe);
free_pipe_idx = find_optimal_free_pipe_as_secondary_dpp_pipe(
free_pipe_idx = dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe(
&cur_ctx->res_ctx, &new_ctx->res_ctx,
pool, opp_head_pipe);
if (free_pipe_idx >= 0) {

View File

@ -137,6 +137,12 @@ bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context);
bool dcn32_is_center_timing(struct pipe_ctx *pipe);
bool dcn32_is_psr_capable(struct pipe_ctx *pipe);
int dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe(
const struct resource_context *cur_res_ctx,
struct resource_context *new_res_ctx,
const struct resource_pool *pool,
const struct pipe_ctx *new_opp_head);
struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe(
const struct dc_state *cur_ctx,
struct dc_state *new_ctx,

View File

@ -758,7 +758,7 @@ static const struct dc_debug_options debug_defaults_drv = {
//must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions
.enable_double_buffered_dsc_pg_support = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
.disable_z10 = true,
.disable_z10 = false,
.ignore_pg = true,
.psp_disabled_wa = true,
.ips2_eval_delay_us = 2000,
@ -1722,13 +1722,12 @@ static bool dcn351_validate_bandwidth(struct dc *dc,
return out;
DC_FP_START();
dcn351_decide_zstate_support(dc, context);
dcn35_decide_zstate_support(dc, context);
DC_FP_END();
return out;
}
static struct resource_funcs dcn351_res_pool_funcs = {
.destroy = dcn351_destroy_resource_pool,
.link_enc_create = dcn35_link_encoder_create,

View File

@ -297,6 +297,7 @@ struct dmub_srv_hw_params {
bool dpia_hpd_int_enable_supported;
bool disable_clock_gate;
bool disallow_dispclk_dppclk_ds;
bool ips_sequential_ono;
enum dmub_memory_access_type mem_access_type;
enum dmub_ips_disable_type disable_ips;
};

View File

@ -1614,7 +1614,7 @@ struct dmub_rb_cmd_idle_opt_dcn_restore {
*/
struct dmub_dcn_notify_idle_cntl_data {
uint8_t driver_idle;
uint8_t pad[1];
uint8_t reserved[59];
};
/**
@ -2335,6 +2335,11 @@ enum phy_link_rate {
* UHBR10 - 20.0 Gbps/Lane
*/
PHY_RATE_2000 = 11,
PHY_RATE_675 = 12,
/**
* Rate 12 - 6.75 Gbps/Lane
*/
};
/**
@ -3062,6 +3067,11 @@ enum dmub_cmd_replay_type {
* Set pseudo vtotal
*/
DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL = 7,
/**
* Set adaptive sync sdp enabled
*/
DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP = 8,
};
/**
@ -3263,6 +3273,20 @@ struct dmub_cmd_replay_set_pseudo_vtotal {
*/
uint8_t pad;
};
struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data {
/**
* Panel Instance.
* Panel isntance to identify which replay_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
/**
* enabled: set adaptive sync sdp enabled
*/
uint8_t force_disabled;
uint8_t pad[2];
};
/**
* Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command.
@ -3366,6 +3390,20 @@ struct dmub_rb_cmd_replay_set_pseudo_vtotal {
struct dmub_cmd_replay_set_pseudo_vtotal data;
};
/**
* Definition of a DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command.
*/
struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp {
/**
* Command header.
*/
struct dmub_cmd_header header;
/**
* Definition of DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command.
*/
struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data data;
};
/**
* Data passed from driver to FW in DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command.
*/
@ -3421,6 +3459,11 @@ union dmub_replay_cmd_set {
* Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command data.
*/
struct dmub_cmd_replay_set_pseudo_vtotal pseudo_vtotal_data;
/**
* Definition of DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command data.
*/
struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data disabled_adaptive_sync_sdp_data;
};
/**
@ -4096,6 +4139,10 @@ enum dmub_cmd_panel_cntl_type {
* Queries backlight info for the embedded panel.
*/
DMUB_CMD__PANEL_CNTL_QUERY_BACKLIGHT_INFO = 1,
/**
* Sets the PWM Freq as per user's requirement.
*/
DMUB_CMD__PANEL_DEBUG_PWM_FREQ = 2,
};
/**
@ -4667,6 +4714,10 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
*/
struct dmub_rb_cmd_replay_set_pseudo_vtotal replay_set_pseudo_vtotal;
/**
* Definition of a DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command.
*/
struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp replay_disabled_adaptive_sync_sdp;
/**
* Definition of a DMUB_CMD__PSP_ASSR_ENABLE command.
*/

View File

@ -420,6 +420,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.disable_clk_ds = params->disallow_dispclk_dppclk_ds;
boot_options.bits.disable_clk_gate = params->disable_clock_gate;
boot_options.bits.ips_disable = params->disable_ips;
boot_options.bits.ips_sequential_ono = params->ips_sequential_ono;
REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
}

View File

@ -237,6 +237,10 @@
#define regSEM_REGISTER_LAST_PART2_BASE_IDX 0
#define regIH_CLIENT_CFG 0x0184
#define regIH_CLIENT_CFG_BASE_IDX 0
#define regIH_RING1_CLIENT_CFG_INDEX 0x0185
#define regIH_RING1_CLIENT_CFG_INDEX_BASE_IDX 0
#define regIH_RING1_CLIENT_CFG_DATA 0x0186
#define regIH_RING1_CLIENT_CFG_DATA_BASE_IDX 0
#define regIH_CLIENT_CFG_INDEX 0x0188
#define regIH_CLIENT_CFG_INDEX_BASE_IDX 0
#define regIH_CLIENT_CFG_DATA 0x0189

View File

@ -888,6 +888,16 @@
//IH_CLIENT_CFG
#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM__SHIFT 0x0
#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM_MASK 0x0000003FL
//IH_RING1_CLIENT_CFG_INDEX
#define IH_RING1_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0
#define IH_RING1_CLIENT_CFG_INDEX__INDEX_MASK 0x00000007L
//IH_RING1_CLIENT_CFG_DATA
#define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID__SHIFT 0x0
#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID__SHIFT 0x8
#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE__SHIFT 0x10
#define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID_MASK 0x000000FFL
#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MASK 0x0000FF00L
#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE_MASK 0x00010000L
//IH_CLIENT_CFG_INDEX
#define IH_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0
#define IH_CLIENT_CFG_INDEX__INDEX_MASK 0x0000001FL

View File

@ -45,6 +45,7 @@
#include "smu_v13_0_6_ppt.h"
#include "smu_v13_0_7_ppt.h"
#include "smu_v14_0_0_ppt.h"
#include "smu_v14_0_2_ppt.h"
#include "amd_pcie.h"
/*
@ -715,6 +716,10 @@ static int smu_set_funcs(struct amdgpu_device *adev)
case IP_VERSION(14, 0, 1):
smu_v14_0_0_set_ppt_funcs(smu);
break;
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 3):
smu_v14_0_2_set_ppt_funcs(smu);
break;
default:
return -EINVAL;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,140 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef SMU_V14_0_2_PPSMC_H
#define SMU_V14_0_2_PPSMC_H
#define PPSMC_VERSION 0x1
// SMU Response Codes:
#define PPSMC_Result_OK 0x1
#define PPSMC_Result_Failed 0xFF
#define PPSMC_Result_UnknownCmd 0xFE
#define PPSMC_Result_CmdRejectedPrereq 0xFD
#define PPSMC_Result_CmdRejectedBusy 0xFC
// Message Definitions:
// BASIC
#define PPSMC_MSG_TestMessage 0x1
#define PPSMC_MSG_GetSmuVersion 0x2
#define PPSMC_MSG_GetDriverIfVersion 0x3
#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4
#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5
#define PPSMC_MSG_EnableAllSmuFeatures 0x6
#define PPSMC_MSG_DisableAllSmuFeatures 0x7
#define PPSMC_MSG_EnableSmuFeaturesLow 0x8
#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9
#define PPSMC_MSG_DisableSmuFeaturesLow 0xA
#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB
#define PPSMC_MSG_GetRunningSmuFeaturesLow 0xC
#define PPSMC_MSG_GetRunningSmuFeaturesHigh 0xD
#define PPSMC_MSG_SetDriverDramAddrHigh 0xE
#define PPSMC_MSG_SetDriverDramAddrLow 0xF
#define PPSMC_MSG_SetToolsDramAddrHigh 0x10
#define PPSMC_MSG_SetToolsDramAddrLow 0x11
#define PPSMC_MSG_TransferTableSmu2Dram 0x12
#define PPSMC_MSG_TransferTableDram2Smu 0x13
#define PPSMC_MSG_UseDefaultPPTable 0x14
//BACO/BAMACO/BOMACO
#define PPSMC_MSG_EnterBaco 0x15
#define PPSMC_MSG_ExitBaco 0x16
#define PPSMC_MSG_ArmD3 0x17
#define PPSMC_MSG_BacoAudioD3PME 0x18
//DPM
#define PPSMC_MSG_SetSoftMinByFreq 0x19
#define PPSMC_MSG_SetSoftMaxByFreq 0x1A
#define PPSMC_MSG_SetHardMinByFreq 0x1B
#define PPSMC_MSG_SetHardMaxByFreq 0x1C
#define PPSMC_MSG_GetMinDpmFreq 0x1D
#define PPSMC_MSG_GetMaxDpmFreq 0x1E
#define PPSMC_MSG_GetDpmFreqByIndex 0x1F
#define PPSMC_MSG_OverridePcieParameters 0x20
//DramLog Set DramAddr
#define PPSMC_MSG_DramLogSetDramAddrHigh 0x21
#define PPSMC_MSG_DramLogSetDramAddrLow 0x22
#define PPSMC_MSG_DramLogSetDramSize 0x23
#define PPSMC_MSG_SetWorkloadMask 0x24
#define PPSMC_MSG_GetVoltageByDpm 0x25 // Can be removed
#define PPSMC_MSG_SetVideoFps 0x26 // Can be removed
#define PPSMC_MSG_GetDcModeMaxDpmFreq 0x27
//Power Gating
#define PPSMC_MSG_AllowGfxOff 0x28
#define PPSMC_MSG_DisallowGfxOff 0x29
#define PPSMC_MSG_PowerUpVcn 0x2A
#define PPSMC_MSG_PowerDownVcn 0x2B
#define PPSMC_MSG_PowerUpJpeg 0x2C
#define PPSMC_MSG_PowerDownJpeg 0x2D
//Resets
#define PPSMC_MSG_PrepareMp1ForUnload 0x2E
#define PPSMC_MSG_Mode1Reset 0x2F
//Set SystemVirtual DramAddrHigh
#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x30
#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x31
//ACDC Power Source
#define PPSMC_MSG_SetPptLimit 0x32
#define PPSMC_MSG_GetPptLimit 0x33
#define PPSMC_MSG_ReenableAcDcInterrupt 0x34
#define PPSMC_MSG_NotifyPowerSource 0x35
//BTC
#define PPSMC_MSG_RunDcBtc 0x36
// 0x37
//Others
#define PPSMC_MSG_SetTemperatureInputSelect 0x38 // Can be removed
#define PPSMC_MSG_SetFwDstatesMask 0x39
#define PPSMC_MSG_SetThrottlerMask 0x3A
#define PPSMC_MSG_SetExternalClientDfCstateAllow 0x3B
#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x3C
//STB to dram log
#define PPSMC_MSG_DumpSTBtoDram 0x3D
#define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x3E
#define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x3F
#define PPSMC_MSG_STBtoDramLogSetDramSize 0x40
#define PPSMC_MSG_SetOBMTraceBufferLogging 0x41
#define PPSMC_MSG_AllowGfxDcs 0x43
#define PPSMC_MSG_DisallowGfxDcs 0x44
#define PPSMC_MSG_EnableAudioStutterWA 0x45
#define PPSMC_MSG_PowerUpUmsch 0x46
#define PPSMC_MSG_PowerDownUmsch 0x47
#define PPSMC_MSG_SetDcsArch 0x48
#define PPSMC_MSG_TriggerVFFLR 0x49
#define PPSMC_MSG_SetNumBadMemoryPagesRetired 0x4A
#define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4B
#define PPSMC_MSG_SetPriorityDeltaGain 0x4C
#define PPSMC_MSG_AllowIHHostInterrupt 0x4D
#define PPSMC_MSG_Mode3Reset 0x4F
#define PPSMC_Message_Count 0x50
#endif

View File

@ -28,7 +28,7 @@
#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x25
#define FEATURE_MASK(feature) (1ULL << feature)
@ -39,7 +39,8 @@
#define MP1_SRAM 0x03c00004
/* address block */
#define smnMP1_FIRMWARE_FLAGS 0x3010028
#define smnMP1_FIRMWARE_FLAGS_14_0_0 0x3010028
#define smnMP1_FIRMWARE_FLAGS 0x3010024
#define smnMP1_PUB_CTRL 0x3010d10
#define MAX_DPM_LEVELS 16

View File

@ -0,0 +1,164 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef SMU_14_0_2_PPTABLE_H
#define SMU_14_0_2_PPTABLE_H
#pragma pack(push, 1)
#define SMU_14_0_2_TABLE_FORMAT_REVISION 3
// POWERPLAYTABLE::ulPlatformCaps
#define SMU_14_0_2_PP_PLATFORM_CAP_POWERPLAY 0x1 // This cap indicates whether CCC need to show Powerplay page.
#define SMU_14_0_2_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 0x2 // This cap indicates whether power source notificaiton is done by SBIOS instead of OS.
#define SMU_14_0_2_PP_PLATFORM_CAP_HARDWAREDC 0x4 // This cap indicates whether DC mode notificaiton is done by GPIO pin directly.
#define SMU_14_0_2_PP_PLATFORM_CAP_BACO 0x8 // This cap indicates whether board supports the BACO circuitry.
#define SMU_14_0_2_PP_PLATFORM_CAP_MACO 0x10 // This cap indicates whether board supports the MACO circuitry.
#define SMU_14_0_2_PP_PLATFORM_CAP_SHADOWPSTATE 0x20 // This cap indicates whether board supports the Shadow Pstate.
#define SMU_14_0_2_PP_PLATFORM_CAP_LEDSUPPORTED 0x40 // This cap indicates whether board supports the LED.
#define SMU_14_0_2_PP_PLATFORM_CAP_MOBILEOVERDRIVE 0x80 // This cap indicates whether board supports the Mobile Overdrive.
// SMU_14_0_2_PP_THERMALCONTROLLER - Thermal Controller Type
#define SMU_14_0_2_PP_THERMALCONTROLLER_NONE 0
#define SMU_14_0_2_PP_OVERDRIVE_VERSION 0x1 // TODO: FIX OverDrive Version TBD
#define SMU_14_0_2_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00
enum SMU_14_0_2_OD_SW_FEATURE_CAP
{
SMU_14_0_2_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT = 0,
SMU_14_0_2_ODCAP_POWER_MODE = 1,
SMU_14_0_2_ODCAP_AUTO_UV_ENGINE = 2,
SMU_14_0_2_ODCAP_AUTO_OC_ENGINE = 3,
SMU_14_0_2_ODCAP_AUTO_OC_MEMORY = 4,
SMU_14_0_2_ODCAP_MEMORY_TIMING_TUNE = 5,
SMU_14_0_2_ODCAP_MANUAL_AC_TIMING = 6,
SMU_14_0_2_ODCAP_AUTO_VF_CURVE_OPTIMIZER = 7,
SMU_14_0_2_ODCAP_AUTO_SOC_UV = 8,
SMU_14_0_2_ODCAP_COUNT = 9,
};
enum SMU_14_0_2_OD_SW_FEATURE_ID
{
SMU_14_0_2_ODFEATURE_AUTO_FAN_ACOUSTIC_LIMIT = 1 << SMU_14_0_2_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT, // Auto Fan Acoustic RPM
SMU_14_0_2_ODFEATURE_POWER_MODE = 1 << SMU_14_0_2_ODCAP_POWER_MODE, // Optimized GPU Power Mode
SMU_14_0_2_ODFEATURE_AUTO_UV_ENGINE = 1 << SMU_14_0_2_ODCAP_AUTO_UV_ENGINE, // Auto Under Volt GFXCLK
SMU_14_0_2_ODFEATURE_AUTO_OC_ENGINE = 1 << SMU_14_0_2_ODCAP_AUTO_OC_ENGINE, // Auto Over Clock GFXCLK
SMU_14_0_2_ODFEATURE_AUTO_OC_MEMORY = 1 << SMU_14_0_2_ODCAP_AUTO_OC_MEMORY, // Auto Over Clock MCLK
SMU_14_0_2_ODFEATURE_MEMORY_TIMING_TUNE = 1 << SMU_14_0_2_ODCAP_MEMORY_TIMING_TUNE, // Auto AC Timing Tuning
SMU_14_0_2_ODFEATURE_MANUAL_AC_TIMING = 1 << SMU_14_0_2_ODCAP_MANUAL_AC_TIMING, // Manual fine grain AC Timing tuning
SMU_14_0_2_ODFEATURE_AUTO_VF_CURVE_OPTIMIZER = 1 << SMU_14_0_2_ODCAP_AUTO_VF_CURVE_OPTIMIZER, // Fine grain auto VF curve tuning
SMU_14_0_2_ODFEATURE_AUTO_SOC_UV = 1 << SMU_14_0_2_ODCAP_AUTO_SOC_UV, // Auto Unver Volt VDDSOC
};
#define SMU_14_0_2_MAX_ODFEATURE 32 // Maximum Number of OD Features
enum SMU_14_0_2_OD_SW_FEATURE_SETTING_ID
{
SMU_14_0_2_ODSETTING_AUTO_FAN_ACOUSTIC_LIMIT = 0,
SMU_14_0_2_ODSETTING_POWER_MODE = 1,
SMU_14_0_2_ODSETTING_AUTOUVENGINE = 2,
SMU_14_0_2_ODSETTING_AUTOOCENGINE = 3,
SMU_14_0_2_ODSETTING_AUTOOCMEMORY = 4,
SMU_14_0_2_ODSETTING_ACTIMING = 5,
SMU_14_0_2_ODSETTING_MANUAL_AC_TIMING = 6,
SMU_14_0_2_ODSETTING_AUTO_VF_CURVE_OPTIMIZER = 7,
SMU_14_0_2_ODSETTING_AUTO_SOC_UV = 8,
SMU_14_0_2_ODSETTING_COUNT = 9,
};
#define SMU_14_0_2_MAX_ODSETTING 64 // Maximum Number of ODSettings
enum SMU_14_0_2_PWRMODE_SETTING
{
SMU_14_0_2_PMSETTING_POWER_LIMIT_QUIET = 0,
SMU_14_0_2_PMSETTING_POWER_LIMIT_BALANCE,
SMU_14_0_2_PMSETTING_POWER_LIMIT_TURBO,
SMU_14_0_2_PMSETTING_POWER_LIMIT_RAGE,
SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_QUIET,
SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_BALANCE,
SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_TURBO,
SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_RAGE,
SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_QUIET,
SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_BALANCE,
SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_TURBO,
SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_RAGE,
SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_QUIET,
SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_BALANCE,
SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_TURBO,
SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_RAGE,
};
#define SMU_14_0_2_MAX_PMSETTING 32 // Maximum Number of PowerMode Settings
enum SMU_14_0_2_overdrive_table_id
{
SMU_14_0_2_OVERDRIVE_TABLE_BASIC = 0,
SMU_14_0_2_OVERDRIVE_TABLE_ADVANCED = 1,
SMU_14_0_2_OVERDRIVE_TABLE_COUNT = 2,
};
struct smu_14_0_2_overdrive_table
{
uint8_t revision; // Revision = SMU_14_0_2_PP_OVERDRIVE_VERSION
uint8_t reserve[3]; // Zero filled field reserved for future use
uint8_t cap[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODFEATURE]; // OD feature support flags
int32_t max[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODSETTING]; // maximum settings
int32_t min[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODSETTING]; // minimum settings
int16_t pm_setting[SMU_14_0_2_MAX_PMSETTING]; // Optimized power mode feature settings
};
struct smu_14_0_2_powerplay_table
{
struct atom_common_table_header header; // header.format_revision = 3 (HAS TO MATCH SMU_14_0_2_TABLE_FORMAT_REVISION), header.content_revision = ? structuresize is calculated by PPGen.
uint8_t table_revision; // PPGen use only: table_revision = 3
uint8_t padding; // Padding 1 byte to align table_size offset to 6 bytes (pmfw_start_offset, for PMFW to know the starting offset of PPTable_t).
uint16_t pmfw_pptable_start_offset; // The start offset of the pmfw portion. i.e. start of PPTable_t (start of SkuTable_t)
uint16_t pmfw_pptable_size; // The total size of pmfw_pptable, i.e PPTable_t.
uint16_t pmfw_pfe_table_start_offset; // The start offset of the PFE_Settings_t within pmfw_pptable.
uint16_t pmfw_pfe_table_size; // The size of PFE_Settings_t.
uint16_t pmfw_board_table_start_offset; // The start offset of the BoardTable_t within pmfw_pptable.
uint16_t pmfw_board_table_size; // The size of BoardTable_t.
uint16_t pmfw_custom_sku_table_start_offset; // The start offset of the CustomSkuTable_t within pmfw_pptable.
uint16_t pmfw_custom_sku_table_size; // The size of the CustomSkuTable_t.
uint32_t golden_pp_id; // PPGen use only: PP Table ID on the Golden Data Base
uint32_t golden_revision; // PPGen use only: PP Table Revision on the Golden Data Base
uint16_t format_id; // PPGen use only: PPTable for different ASICs.
uint32_t platform_caps; // POWERPLAYTABLE::ulPlatformCaps
uint8_t thermal_controller_type; // one of smu_14_0_2_PP_THERMALCONTROLLER
uint16_t small_power_limit1;
uint16_t small_power_limit2;
uint16_t boost_power_limit; // For Gemini Board, when the slave adapter is in BACO mode, the master adapter will use this boost power limit instead of the default power limit to boost the power limit.
uint16_t software_shutdown_temp;
uint8_t reserve[143]; // Zero filled field reserved for future use
struct smu_14_0_2_overdrive_table overdrive_table;
PPTable_t smc_pptable; // PPTable_t in driver_if.h -- as requested by PMFW, this offset should start at a 32-byte boundary, and the table_size above should remain at offset=6 bytes
};
#pragma pack(pop)
#endif

View File

@ -3118,12 +3118,25 @@ static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev,
return 0;
}
static int aca_smu_parse_error_code(struct amdgpu_device *adev, struct aca_bank *bank)
{
int error_code;
if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600)
error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]);
else
error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]);
return error_code & 0xff;
}
static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = {
.max_ue_bank_count = 12,
.max_ce_bank_count = 12,
.set_debug_mode = aca_smu_set_debug_mode,
.get_valid_aca_count = aca_smu_get_valid_aca_count,
.get_valid_aca_bank = aca_smu_get_valid_aca_bank,
.parse_error_code = aca_smu_parse_error_code,
};
static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu,

View File

@ -23,7 +23,7 @@
# Makefile for the 'smu manager' sub-component of powerplay.
# It provides the smu management services for the driver.
SMU14_MGR = smu_v14_0.o smu_v14_0_0_ppt.o
SMU14_MGR = smu_v14_0.o smu_v14_0_0_ppt.o smu_v14_0_2_ppt.o
AMD_SWSMU_SMU14MGR = $(addprefix $(AMD_SWSMU_PATH)/smu14/,$(SMU14_MGR))

View File

@ -38,8 +38,13 @@
#include "amdgpu_ras.h"
#include "smu_cmn.h"
#include "asic_reg/mp/mp_14_0_0_offset.h"
#include "asic_reg/mp/mp_14_0_0_sh_mask.h"
#include "asic_reg/mp/mp_14_0_2_offset.h"
#include "asic_reg/mp/mp_14_0_2_sh_mask.h"
#define regMP1_SMN_IH_SW_INT_mp1_14_0_0 0x0341
#define regMP1_SMN_IH_SW_INT_mp1_14_0_0_BASE_IDX 0
#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0 0x0342
#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0_BASE_IDX 0
/*
* DO NOT use these for err/warn/info/debug messages.
@ -52,6 +57,7 @@
#undef pr_debug
MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin");
MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin");
#define ENABLE_IMU_ARG_GFXOFF_ENABLE 1
@ -59,7 +65,7 @@ int smu_v14_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
char fw_name[30];
char ucode_prefix[15];
char ucode_prefix[30];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
const struct common_firmware_header *header;
@ -106,7 +112,6 @@ void smu_v14_0_fini_microcode(struct smu_context *smu)
int smu_v14_0_load_microcode(struct smu_context *smu)
{
#if 0
struct amdgpu_device *adev = smu->adev;
const uint32_t *src;
const struct smc_firmware_header_v1_0 *hdr;
@ -131,8 +136,12 @@ int smu_v14_0_load_microcode(struct smu_context *smu)
1 & ~MP1_SMN_PUB_CTRL__LX3_RESET_MASK);
for (i = 0; i < adev->usec_timeout; i++) {
mp1_fw_flags = RREG32_PCIE(MP1_Public |
(smnMP1_FIRMWARE_FLAGS & 0xffffffff));
if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
mp1_fw_flags = RREG32_PCIE(MP1_Public |
(smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff));
else
mp1_fw_flags = RREG32_PCIE(MP1_Public |
(smnMP1_FIRMWARE_FLAGS & 0xffffffff));
if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
break;
@ -142,9 +151,7 @@ int smu_v14_0_load_microcode(struct smu_context *smu)
if (i == adev->usec_timeout)
return -ETIME;
#endif
return 0;
}
int smu_v14_0_init_pptable_microcode(struct smu_context *smu)
@ -165,6 +172,10 @@ int smu_v14_0_init_pptable_microcode(struct smu_context *smu)
if (!adev->scpm_enabled)
return 0;
if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2)) ||
(amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 3)))
return 0;
/* override pptable_id from driver parameter */
if (amdgpu_smu_pptable_id >= 0) {
pptable_id = amdgpu_smu_pptable_id;
@ -198,7 +209,11 @@ int smu_v14_0_check_fw_status(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
uint32_t mp1_fw_flags;
mp1_fw_flags = RREG32_PCIE(MP1_Public |
if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
mp1_fw_flags = RREG32_PCIE(MP1_Public |
(smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff));
else
mp1_fw_flags = RREG32_PCIE(MP1_Public |
(smnMP1_FIRMWARE_FLAGS & 0xffffffff));
if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
@ -227,16 +242,16 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
adev->pm.fw_version = smu_version;
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(14, 0, 2):
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
break;
case IP_VERSION(14, 0, 0):
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
break;
case IP_VERSION(14, 0, 1):
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1;
break;
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 3):
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
break;
default:
dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
amdgpu_ip_version(adev, MP1_HWIP, 0));
@ -738,9 +753,9 @@ int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable)
struct amdgpu_device *adev = smu->adev;
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 1):
case IP_VERSION(14, 0, 2):
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
@ -841,9 +856,15 @@ static int smu_v14_0_set_irq_state(struct amdgpu_device *adev,
// TODO
/* For MP1 SW irqs */
val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) {
val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val);
} else {
val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
}
break;
case AMDGPU_IRQ_STATE_ENABLE:
@ -851,14 +872,25 @@ static int smu_v14_0_set_irq_state(struct amdgpu_device *adev,
// TODO
/* For MP1 SW irqs */
val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) {
val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0, val);
val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val);
} else {
val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
}
break;
default:
@ -868,11 +900,32 @@ static int smu_v14_0_set_irq_state(struct amdgpu_device *adev,
return 0;
}
#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
static int smu_v14_0_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
// TODO
struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t client_id = entry->client_id;
uint32_t src_id = entry->src_id;
if (client_id == SOC15_IH_CLIENTID_THM) {
switch (src_id) {
case THM_11_0__SRCID__THM_DIG_THERM_L2H:
schedule_delayed_work(&smu->swctf_delayed_work,
msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
break;
case THM_11_0__SRCID__THM_DIG_THERM_H2L:
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
break;
default:
dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
src_id);
break;
}
}
return 0;
}
@ -894,7 +947,17 @@ int smu_v14_0_register_irq_handler(struct smu_context *smu)
irq_src->num_types = 1;
irq_src->funcs = &smu_v14_0_irq_funcs;
// TODO: THM related
ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
THM_11_0__SRCID__THM_DIG_THERM_L2H,
irq_src);
if (ret)
return ret;
ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
THM_11_0__SRCID__THM_DIG_THERM_H2L,
irq_src);
if (ret)
return ret;
ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
SMU_IH_INTERRUPT_ID_TO_DRIVER,

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,28 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __SMU_V14_0_2_PPT_H__
#define __SMU_V14_0_2_PPT_H__
extern void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu);
#endif

View File

@ -424,7 +424,7 @@ typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
typedef struct _ATOM_PPLIB_STATE_V2
{
//number of valid dpm levels in this state; Driver uses it to calculate the whole
//size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
//size of the state: struct_size(ATOM_PPLIB_STATE_V2, clockInfoIndex, ucNumDPMLevels)
UCHAR ucNumDPMLevels;
//a index to the array of nonClockInfos
@ -432,14 +432,14 @@ typedef struct _ATOM_PPLIB_STATE_V2
/**
* Driver will read the first ucNumDPMLevels in this array
*/
UCHAR clockInfoIndex[1];
UCHAR clockInfoIndex[] __counted_by(ucNumDPMLevels);
} ATOM_PPLIB_STATE_V2;
typedef struct _StateArray{
//how many states we have
UCHAR ucNumEntries;
ATOM_PPLIB_STATE_V2 states[1];
ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries);
}StateArray;
@ -450,7 +450,7 @@ typedef struct _ClockInfoArray{
//sizeof(ATOM_PPLIB_CLOCK_INFO)
UCHAR ucEntrySize;
UCHAR clockInfo[1];
UCHAR clockInfo[] __counted_by(ucNumEntries);
}ClockInfoArray;
typedef struct _NonClockInfoArray{
@ -460,7 +460,7 @@ typedef struct _NonClockInfoArray{
//sizeof(ATOM_PPLIB_NONCLOCK_INFO)
UCHAR ucEntrySize;
ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries);
}NonClockInfoArray;
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record

View File

@ -923,8 +923,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
for (i = 0; i < max_device; i++) {
ATOM_CONNECTOR_INFO_I2C ci =
supported_devices->info.asConnInfo[i];
ATOM_CONNECTOR_INFO_I2C ci;
if (frev > 1)
ci = supported_devices->info_2d1.asConnInfo[i];
else
ci = supported_devices->info.asConnInfo[i];
bios_connectors[i].valid = false;