mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
amd-drm-next-6.8-2023-12-01:
amdgpu: - Add new 64 bit sequence number infrastructure. This will ultimately be used for user queue synchronization. - GPUVM updates - Misc code cleanups - RAS updates - DCN 3.5 updates - Rework PCIe link speed handling - Document GPU reset types - DMUB fixes - eDP fixes - NBIO 7.9 updates - NBIO 7.11 updates - SubVP updates - DCN 3.1.4 fixes - ABM fixes - AGP aperture fix - DCN 3.1.5 fix - Fix some potential error path memory leaks - Enable PCIe PMEs - Add XGMI, PCIe state dumping for aqua vanjaram - GFX11 golden register updates - Misc display fixes amdkfd: - Migrate TLB flushing logic to amdgpu - Trap handler fixes - Fix restore workers handling on suspend and reset - Fix possible memory leak in pqm_uninit() radeon: - Fix some possible overflows in command buffer checking - Check for errors in ring_lock -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCZWohuwAKCRC93/aFa7yZ 2A6XAP9a6/3noDYxabnLmr3B+13wtweSkvEwD2bjOk7KinyS6AEAr8H3HZ5U0HMP zj35QhcNcYaTsjrip7e53XunpMBQSwQ= =DvId -----END PGP SIGNATURE----- Merge tag 'amd-drm-next-6.8-2023-12-01' of https://gitlab.freedesktop.org/agd5f/linux into drm-next amd-drm-next-6.8-2023-12-01: amdgpu: - Add new 64 bit sequence number infrastructure. This will ultimately be used for user queue synchronization. - GPUVM updates - Misc code cleanups - RAS updates - DCN 3.5 updates - Rework PCIe link speed handling - Document GPU reset types - DMUB fixes - eDP fixes - NBIO 7.9 updates - NBIO 7.11 updates - SubVP updates - DCN 3.1.4 fixes - ABM fixes - AGP aperture fix - DCN 3.1.5 fix - Fix some potential error path memory leaks - Enable PCIe PMEs - Add XGMI, PCIe state dumping for aqua vanjaram - GFX11 golden register updates - Misc display fixes amdkfd: - Migrate TLB flushing logic to amdgpu - Trap handler fixes - Fix restore workers handling on suspend and reset - Fix possible memory leak in pqm_uninit() radeon: - Fix some possible overflows in command buffer checking - Check for errors in ring_lock From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20231201181743.5313-1-alexander.deucher@amd.com Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
commit
5edfd7d94b
@ -75,3 +75,44 @@ change in real-time by using something like::
|
||||
|
||||
When reporting a bug related to DC, consider attaching this log before and
|
||||
after you reproduce the bug.
|
||||
|
||||
DMUB Firmware Debug
|
||||
===================
|
||||
|
||||
Sometimes, dmesg logs aren't enough. This is especially true if a feature is
|
||||
implemented primarily in DMUB firmware. In such cases, all we see in dmesg when
|
||||
an issue arises is some generic timeout error. So, to get more relevant
|
||||
information, we can trace DMUB commands by enabling the relevant bits in
|
||||
`amdgpu_dm_dmub_trace_mask`.
|
||||
|
||||
Currently, we support the tracing of the following groups:
|
||||
|
||||
Trace Groups
|
||||
------------
|
||||
|
||||
.. csv-table::
|
||||
:header-rows: 1
|
||||
:widths: 1, 1
|
||||
:file: ./trace-groups-table.csv
|
||||
|
||||
**Note: Not all ASICs support all of the listed trace groups**
|
||||
|
||||
So, to enable just PSR tracing you can use the following command::
|
||||
|
||||
# echo 0x8020 > /sys/kernel/debug/dri/0/amdgpu_dm_dmub_trace_mask
|
||||
|
||||
Then, you need to enable logging trace events to the buffer, which you can do
|
||||
using the following::
|
||||
|
||||
# echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en
|
||||
|
||||
Lastly, after you are able to reproduce the issue you are trying to debug,
|
||||
you can disable tracing and read the trace log by using the following::
|
||||
|
||||
# echo 0 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en
|
||||
# cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_tracebuffer
|
||||
|
||||
So, when reporting bugs related to features such as PSR and ABM, consider
|
||||
enabling the relevant bits in the mask before reproducing the issue and
|
||||
attach the log that you obtain from the trace buffer in any bug reports that you
|
||||
create.
|
||||
|
29
Documentation/gpu/amdgpu/display/trace-groups-table.csv
Normal file
29
Documentation/gpu/amdgpu/display/trace-groups-table.csv
Normal file
@ -0,0 +1,29 @@
|
||||
Name, Mask Value
|
||||
INFO, 0x1
|
||||
IRQ SVC, 0x2
|
||||
VBIOS, 0x4
|
||||
REGISTER, 0x8
|
||||
PHY DBG, 0x10
|
||||
PSR, 0x20
|
||||
AUX, 0x40
|
||||
SMU, 0x80
|
||||
MALL, 0x100
|
||||
ABM, 0x200
|
||||
ALPM, 0x400
|
||||
TIMER, 0x800
|
||||
HW LOCK MGR, 0x1000
|
||||
INBOX1, 0x2000
|
||||
PHY SEQ, 0x4000
|
||||
PSR STATE, 0x8000
|
||||
ZSTATE, 0x10000
|
||||
TRANSMITTER CTL, 0x20000
|
||||
PANEL CNTL, 0x40000
|
||||
FAMS, 0x80000
|
||||
DPIA, 0x100000
|
||||
SUBVP, 0x200000
|
||||
INBOX0, 0x400000
|
||||
SDP, 0x4000000
|
||||
REPLAY, 0x8000000
|
||||
REPLAY RESIDENCY, 0x20000000
|
||||
CURSOR INFO, 0x80000000
|
||||
IPS, 0x100000000
|
|
@ -80,7 +80,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
|
||||
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
|
||||
amdgpu_fw_attestation.o amdgpu_securedisplay.o \
|
||||
amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \
|
||||
amdgpu_ring_mux.o amdgpu_xcp.o
|
||||
amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o
|
||||
|
||||
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
|
||||
|
||||
|
@ -109,6 +109,8 @@
|
||||
#include "amdgpu_mca.h"
|
||||
#include "amdgpu_ras.h"
|
||||
#include "amdgpu_xcp.h"
|
||||
#include "amdgpu_seq64.h"
|
||||
#include "amdgpu_reg_state.h"
|
||||
|
||||
#define MAX_GPU_INSTANCE 64
|
||||
|
||||
@ -468,6 +470,7 @@ struct amdgpu_fpriv {
|
||||
struct amdgpu_vm vm;
|
||||
struct amdgpu_bo_va *prt_va;
|
||||
struct amdgpu_bo_va *csa_va;
|
||||
struct amdgpu_bo_va *seq64_va;
|
||||
struct mutex bo_list_lock;
|
||||
struct idr bo_list_handles;
|
||||
struct amdgpu_ctx_mgr ctx_mgr;
|
||||
@ -506,6 +509,31 @@ struct amdgpu_allowed_register_entry {
|
||||
bool grbm_indexed;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum amd_reset_method - Methods for resetting AMD GPU devices
|
||||
*
|
||||
* @AMD_RESET_METHOD_NONE: The device will not be reset.
|
||||
* @AMD_RESET_LEGACY: Method reserved for SI, CIK and VI ASICs.
|
||||
* @AMD_RESET_MODE0: Reset the entire ASIC. Not currently available for the
|
||||
* any device.
|
||||
* @AMD_RESET_MODE1: Resets all IP blocks on the ASIC (SDMA, GFX, VCN, etc.)
|
||||
* individually. Suitable only for some discrete GPU, not
|
||||
* available for all ASICs.
|
||||
* @AMD_RESET_MODE2: Resets a lesser level of IPs compared to MODE1. Which IPs
|
||||
* are reset depends on the ASIC. Notably doesn't reset IPs
|
||||
* shared with the CPU on APUs or the memory controllers (so
|
||||
* VRAM is not lost). Not available on all ASICs.
|
||||
* @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card
|
||||
* but without powering off the PCI bus. Suitable only for
|
||||
* discrete GPUs.
|
||||
* @AMD_RESET_PCI: Does a full bus reset using core Linux subsystem PCI reset
|
||||
* and does a secondary bus reset or FLR, depending on what the
|
||||
* underlying hardware supports.
|
||||
*
|
||||
* Methods available for AMD GPU driver for resetting the device. Not all
|
||||
* methods are suitable for every device. User can override the method using
|
||||
* module parameter `reset_method`.
|
||||
*/
|
||||
enum amd_reset_method {
|
||||
AMD_RESET_METHOD_NONE = -1,
|
||||
AMD_RESET_METHOD_LEGACY = 0,
|
||||
@ -585,6 +613,10 @@ struct amdgpu_asic_funcs {
|
||||
const struct amdgpu_video_codecs **codecs);
|
||||
/* encode "> 32bits" smn addressing */
|
||||
u64 (*encode_ext_smn_addressing)(int ext_id);
|
||||
|
||||
ssize_t (*get_reg_state)(struct amdgpu_device *adev,
|
||||
enum amdgpu_reg_state reg_state, void *buf,
|
||||
size_t max_size);
|
||||
};
|
||||
|
||||
/*
|
||||
@ -986,6 +1018,9 @@ struct amdgpu_device {
|
||||
/* GDS */
|
||||
struct amdgpu_gds gds;
|
||||
|
||||
/* for userq and VM fences */
|
||||
struct amdgpu_seq64 seq64;
|
||||
|
||||
/* KFD */
|
||||
struct amdgpu_kfd_dev kfd;
|
||||
|
||||
|
@ -547,7 +547,7 @@ int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
|
||||
struct amdgpu_device *adev = dst, *peer_adev;
|
||||
int num_links;
|
||||
|
||||
if (adev->asic_type != CHIP_ALDEBARAN)
|
||||
if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2))
|
||||
return 0;
|
||||
|
||||
if (src)
|
||||
@ -710,35 +710,6 @@ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
|
||||
return false;
|
||||
}
|
||||
|
||||
int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev,
|
||||
uint16_t vmid)
|
||||
{
|
||||
if (adev->family == AMDGPU_FAMILY_AI) {
|
||||
int i;
|
||||
|
||||
for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
|
||||
amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
|
||||
} else {
|
||||
amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0), 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
uint16_t pasid,
|
||||
enum TLB_FLUSH_TYPE flush_type,
|
||||
uint32_t inst)
|
||||
{
|
||||
bool all_hub = false;
|
||||
|
||||
if (adev->family == AMDGPU_FAMILY_AI ||
|
||||
adev->family == AMDGPU_FAMILY_RV)
|
||||
all_hub = true;
|
||||
|
||||
return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub, inst);
|
||||
}
|
||||
|
||||
bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev)
|
||||
{
|
||||
return adev->have_atomics_support;
|
||||
|
@ -162,11 +162,6 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
|
||||
uint32_t *ib_cmd, uint32_t ib_len);
|
||||
void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle);
|
||||
bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev);
|
||||
int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev,
|
||||
uint16_t vmid);
|
||||
int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
uint16_t pasid, enum TLB_FLUSH_TYPE flush_type,
|
||||
uint32_t inst);
|
||||
|
||||
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
|
||||
|
||||
|
@ -200,7 +200,7 @@ int kgd_arcturus_hqd_sdma_dump(struct amdgpu_device *adev,
|
||||
#undef HQD_N_REGS
|
||||
#define HQD_N_REGS (19+6+7+10)
|
||||
|
||||
*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
|
||||
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
|
||||
if (*dump == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -141,7 +141,7 @@ static int kgd_gfx_v9_4_3_hqd_sdma_dump(struct amdgpu_device *adev,
|
||||
(*dump)[i++][1] = RREG32(addr); \
|
||||
} while (0)
|
||||
|
||||
*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
|
||||
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
|
||||
if (*dump == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -214,7 +214,7 @@ static int kgd_hqd_dump(struct amdgpu_device *adev,
|
||||
(*dump)[i++][1] = RREG32(addr); \
|
||||
} while (0)
|
||||
|
||||
*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
|
||||
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
|
||||
if (*dump == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -301,7 +301,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
|
||||
#undef HQD_N_REGS
|
||||
#define HQD_N_REGS (19+4)
|
||||
|
||||
*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
|
||||
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
|
||||
if (*dump == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -238,7 +238,7 @@ static int kgd_hqd_dump(struct amdgpu_device *adev,
|
||||
(*dump)[i++][1] = RREG32(addr); \
|
||||
} while (0)
|
||||
|
||||
*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
|
||||
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
|
||||
if (*dump == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -324,7 +324,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
|
||||
#undef HQD_N_REGS
|
||||
#define HQD_N_REGS (19+4+2+3+7)
|
||||
|
||||
*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
|
||||
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
|
||||
if (*dump == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -363,7 +363,7 @@ int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev,
|
||||
(*dump)[i++][1] = RREG32(addr); \
|
||||
} while (0)
|
||||
|
||||
*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
|
||||
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
|
||||
if (*dump == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -460,7 +460,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
|
||||
#undef HQD_N_REGS
|
||||
#define HQD_N_REGS (19+6+7+10)
|
||||
|
||||
*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
|
||||
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
|
||||
if (*dump == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1384,7 +1384,6 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
|
||||
amdgpu_amdkfd_restore_userptr_worker);
|
||||
|
||||
*process_info = info;
|
||||
*ef = dma_fence_get(&info->eviction_fence->base);
|
||||
}
|
||||
|
||||
vm->process_info = *process_info;
|
||||
@ -1415,6 +1414,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
|
||||
list_add_tail(&vm->vm_list_node,
|
||||
&(vm->process_info->vm_list_head));
|
||||
vm->process_info->n_vms++;
|
||||
|
||||
*ef = dma_fence_get(&vm->process_info->eviction_fence->base);
|
||||
mutex_unlock(&vm->process_info->lock);
|
||||
|
||||
return 0;
|
||||
@ -1426,10 +1427,7 @@ validate_pd_fail:
|
||||
reserve_pd_fail:
|
||||
vm->process_info = NULL;
|
||||
if (info) {
|
||||
/* Two fence references: one in info and one in *ef */
|
||||
dma_fence_put(&info->eviction_fence->base);
|
||||
dma_fence_put(*ef);
|
||||
*ef = NULL;
|
||||
*process_info = NULL;
|
||||
put_pid(info->pid);
|
||||
create_evict_fence_fail:
|
||||
@ -1623,7 +1621,8 @@ int amdgpu_amdkfd_criu_resume(void *p)
|
||||
goto out_unlock;
|
||||
}
|
||||
WRITE_ONCE(pinfo->block_mmu_notifications, false);
|
||||
schedule_delayed_work(&pinfo->restore_userptr_work, 0);
|
||||
queue_delayed_work(system_freezable_wq,
|
||||
&pinfo->restore_userptr_work, 0);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&pinfo->lock);
|
||||
@ -2426,7 +2425,8 @@ int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
|
||||
KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
|
||||
if (r)
|
||||
pr_err("Failed to quiesce KFD\n");
|
||||
schedule_delayed_work(&process_info->restore_userptr_work,
|
||||
queue_delayed_work(system_freezable_wq,
|
||||
&process_info->restore_userptr_work,
|
||||
msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
|
||||
}
|
||||
mutex_unlock(&process_info->notifier_lock);
|
||||
@ -2749,7 +2749,8 @@ unlock_out:
|
||||
|
||||
/* If validation failed, reschedule another attempt */
|
||||
if (evicted_bos) {
|
||||
schedule_delayed_work(&process_info->restore_userptr_work,
|
||||
queue_delayed_work(system_freezable_wq,
|
||||
&process_info->restore_userptr_work,
|
||||
msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
|
||||
|
||||
kfd_smi_event_queue_restore_rescheduled(mm);
|
||||
@ -2758,6 +2759,23 @@ unlock_out:
|
||||
put_task_struct(usertask);
|
||||
}
|
||||
|
||||
static void replace_eviction_fence(struct dma_fence **ef,
|
||||
struct dma_fence *new_ef)
|
||||
{
|
||||
struct dma_fence *old_ef = rcu_replace_pointer(*ef, new_ef, true
|
||||
/* protected by process_info->lock */);
|
||||
|
||||
/* If we're replacing an unsignaled eviction fence, that fence will
|
||||
* never be signaled, and if anyone is still waiting on that fence,
|
||||
* they will hang forever. This should never happen. We should only
|
||||
* replace the fence in restore_work that only gets scheduled after
|
||||
* eviction work signaled the fence.
|
||||
*/
|
||||
WARN_ONCE(!dma_fence_is_signaled(old_ef),
|
||||
"Replacing unsignaled eviction fence");
|
||||
dma_fence_put(old_ef);
|
||||
}
|
||||
|
||||
/** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
|
||||
* KFD process identified by process_info
|
||||
*
|
||||
@ -2781,7 +2799,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
|
||||
struct amdkfd_process_info *process_info = info;
|
||||
struct amdgpu_vm *peer_vm;
|
||||
struct kgd_mem *mem;
|
||||
struct amdgpu_amdkfd_fence *new_fence;
|
||||
struct list_head duplicate_save;
|
||||
struct amdgpu_sync sync_obj;
|
||||
unsigned long failed_size = 0;
|
||||
@ -2825,12 +2842,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
|
||||
if (ret)
|
||||
goto validate_map_fail;
|
||||
|
||||
ret = process_sync_pds_resv(process_info, &sync_obj);
|
||||
if (ret) {
|
||||
pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
|
||||
goto validate_map_fail;
|
||||
}
|
||||
|
||||
/* Validate BOs and map them to GPUVM (update VM page tables). */
|
||||
list_for_each_entry(mem, &process_info->kfd_bo_list,
|
||||
validate_list) {
|
||||
@ -2881,6 +2892,19 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
|
||||
if (failed_size)
|
||||
pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
|
||||
|
||||
/* Update mappings not managed by KFD */
|
||||
list_for_each_entry(peer_vm, &process_info->vm_list_head,
|
||||
vm_list_node) {
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(
|
||||
peer_vm->root.bo->tbo.bdev);
|
||||
|
||||
ret = amdgpu_vm_handle_moved(adev, peer_vm, &exec.ticket);
|
||||
if (ret) {
|
||||
pr_debug("Memory eviction: handle moved failed. Try again\n");
|
||||
goto validate_map_fail;
|
||||
}
|
||||
}
|
||||
|
||||
/* Update page directories */
|
||||
ret = process_update_pds(process_info, &sync_obj);
|
||||
if (ret) {
|
||||
@ -2888,25 +2912,47 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
|
||||
goto validate_map_fail;
|
||||
}
|
||||
|
||||
/* Sync with fences on all the page tables. They implicitly depend on any
|
||||
* move fences from amdgpu_vm_handle_moved above.
|
||||
*/
|
||||
ret = process_sync_pds_resv(process_info, &sync_obj);
|
||||
if (ret) {
|
||||
pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
|
||||
goto validate_map_fail;
|
||||
}
|
||||
|
||||
/* Wait for validate and PT updates to finish */
|
||||
amdgpu_sync_wait(&sync_obj, false);
|
||||
|
||||
/* Release old eviction fence and create new one, because fence only
|
||||
* goes from unsignaled to signaled, fence cannot be reused.
|
||||
* Use context and mm from the old fence.
|
||||
/* The old eviction fence may be unsignaled if restore happens
|
||||
* after a GPU reset or suspend/resume. Keep the old fence in that
|
||||
* case. Otherwise release the old eviction fence and create new
|
||||
* one, because fence only goes from unsignaled to signaled once
|
||||
* and cannot be reused. Use context and mm from the old fence.
|
||||
*
|
||||
* If an old eviction fence signals after this check, that's OK.
|
||||
* Anyone signaling an eviction fence must stop the queues first
|
||||
* and schedule another restore worker.
|
||||
*/
|
||||
new_fence = amdgpu_amdkfd_fence_create(
|
||||
if (dma_fence_is_signaled(&process_info->eviction_fence->base)) {
|
||||
struct amdgpu_amdkfd_fence *new_fence =
|
||||
amdgpu_amdkfd_fence_create(
|
||||
process_info->eviction_fence->base.context,
|
||||
process_info->eviction_fence->mm,
|
||||
NULL);
|
||||
if (!new_fence) {
|
||||
pr_err("Failed to create eviction fence\n");
|
||||
ret = -ENOMEM;
|
||||
goto validate_map_fail;
|
||||
|
||||
if (!new_fence) {
|
||||
pr_err("Failed to create eviction fence\n");
|
||||
ret = -ENOMEM;
|
||||
goto validate_map_fail;
|
||||
}
|
||||
dma_fence_put(&process_info->eviction_fence->base);
|
||||
process_info->eviction_fence = new_fence;
|
||||
replace_eviction_fence(ef, dma_fence_get(&new_fence->base));
|
||||
} else {
|
||||
WARN_ONCE(*ef != &process_info->eviction_fence->base,
|
||||
"KFD eviction fence doesn't match KGD process_info");
|
||||
}
|
||||
dma_fence_put(&process_info->eviction_fence->base);
|
||||
process_info->eviction_fence = new_fence;
|
||||
*ef = dma_fence_get(&new_fence->base);
|
||||
|
||||
/* Attach new eviction fence to all BOs except pinned ones */
|
||||
list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) {
|
||||
|
@ -103,7 +103,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
|
||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||
struct amdgpu_connector_atom_dig *dig_connector;
|
||||
int bpc = 8;
|
||||
unsigned mode_clock, max_tmds_clock;
|
||||
unsigned int mode_clock, max_tmds_clock;
|
||||
|
||||
switch (connector->connector_type) {
|
||||
case DRM_MODE_CONNECTOR_DVII:
|
||||
@ -255,6 +255,7 @@ struct edid *amdgpu_connector_edid(struct drm_connector *connector)
|
||||
return amdgpu_connector->edid;
|
||||
} else if (edid_blob) {
|
||||
struct edid *edid = kmemdup(edid_blob->data, edid_blob->length, GFP_KERNEL);
|
||||
|
||||
if (edid)
|
||||
amdgpu_connector->edid = edid;
|
||||
}
|
||||
@ -581,6 +582,7 @@ static int amdgpu_connector_set_property(struct drm_connector *connector,
|
||||
amdgpu_encoder = to_amdgpu_encoder(connector->encoder);
|
||||
} else {
|
||||
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
|
||||
|
||||
amdgpu_encoder = to_amdgpu_encoder(connector_funcs->best_encoder(connector));
|
||||
}
|
||||
|
||||
@ -797,6 +799,7 @@ static int amdgpu_connector_set_lcd_property(struct drm_connector *connector,
|
||||
amdgpu_encoder = to_amdgpu_encoder(connector->encoder);
|
||||
else {
|
||||
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
|
||||
|
||||
amdgpu_encoder = to_amdgpu_encoder(connector_funcs->best_encoder(connector));
|
||||
}
|
||||
|
||||
@ -979,6 +982,41 @@ amdgpu_connector_check_hpd_status_unchanged(struct drm_connector *connector)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void amdgpu_connector_shared_ddc(enum drm_connector_status *status,
|
||||
struct drm_connector *connector,
|
||||
struct amdgpu_connector *amdgpu_connector)
|
||||
{
|
||||
struct drm_connector *list_connector;
|
||||
struct drm_connector_list_iter iter;
|
||||
struct amdgpu_connector *list_amdgpu_connector;
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
|
||||
if (amdgpu_connector->shared_ddc && *status == connector_status_connected) {
|
||||
drm_connector_list_iter_begin(dev, &iter);
|
||||
drm_for_each_connector_iter(list_connector,
|
||||
&iter) {
|
||||
if (connector == list_connector)
|
||||
continue;
|
||||
list_amdgpu_connector = to_amdgpu_connector(list_connector);
|
||||
if (list_amdgpu_connector->shared_ddc &&
|
||||
list_amdgpu_connector->ddc_bus->rec.i2c_id ==
|
||||
amdgpu_connector->ddc_bus->rec.i2c_id) {
|
||||
/* cases where both connectors are digital */
|
||||
if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
|
||||
/* hpd is our only option in this case */
|
||||
if (!amdgpu_display_hpd_sense(adev,
|
||||
amdgpu_connector->hpd.hpd)) {
|
||||
amdgpu_connector_free_edid(connector);
|
||||
*status = connector_status_disconnected;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
drm_connector_list_iter_end(&iter);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* DVI is complicated
|
||||
* Do a DDC probe, if DDC probe passes, get the full EDID so
|
||||
@ -1065,32 +1103,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
|
||||
* DDC line. The latter is more complex because with DVI<->HDMI adapters
|
||||
* you don't really know what's connected to which port as both are digital.
|
||||
*/
|
||||
if (amdgpu_connector->shared_ddc && (ret == connector_status_connected)) {
|
||||
struct drm_connector *list_connector;
|
||||
struct drm_connector_list_iter iter;
|
||||
struct amdgpu_connector *list_amdgpu_connector;
|
||||
|
||||
drm_connector_list_iter_begin(dev, &iter);
|
||||
drm_for_each_connector_iter(list_connector,
|
||||
&iter) {
|
||||
if (connector == list_connector)
|
||||
continue;
|
||||
list_amdgpu_connector = to_amdgpu_connector(list_connector);
|
||||
if (list_amdgpu_connector->shared_ddc &&
|
||||
(list_amdgpu_connector->ddc_bus->rec.i2c_id ==
|
||||
amdgpu_connector->ddc_bus->rec.i2c_id)) {
|
||||
/* cases where both connectors are digital */
|
||||
if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
|
||||
/* hpd is our only option in this case */
|
||||
if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
|
||||
amdgpu_connector_free_edid(connector);
|
||||
ret = connector_status_disconnected;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
drm_connector_list_iter_end(&iter);
|
||||
}
|
||||
amdgpu_connector_shared_ddc(&ret, connector, amdgpu_connector);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1192,6 +1205,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
|
||||
static void amdgpu_connector_dvi_force(struct drm_connector *connector)
|
||||
{
|
||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||
|
||||
if (connector->force == DRM_FORCE_ON)
|
||||
amdgpu_connector->use_digital = false;
|
||||
if (connector->force == DRM_FORCE_ON_DIGITAL)
|
||||
@ -1426,6 +1440,7 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
|
||||
ret = connector_status_connected;
|
||||
else if (amdgpu_connector->dac_load_detect) { /* try load detection */
|
||||
const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
|
||||
|
||||
ret = encoder_funcs->detect(encoder, connector);
|
||||
}
|
||||
}
|
||||
|
@ -638,6 +638,9 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
|
||||
if (size & 0x3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
||||
if (!adev->didt_rreg)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||
if (r < 0) {
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
@ -694,6 +697,9 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
|
||||
if (size & 0x3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
||||
if (!adev->didt_wreg)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||
if (r < 0) {
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
@ -162,6 +162,65 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
|
||||
static DEVICE_ATTR(pcie_replay_count, 0444,
|
||||
amdgpu_device_get_pcie_replay_count, NULL);
|
||||
|
||||
static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
|
||||
struct bin_attribute *attr, char *buf,
|
||||
loff_t ppos, size_t count)
|
||||
{
|
||||
struct device *dev = kobj_to_dev(kobj);
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(ddev);
|
||||
ssize_t bytes_read;
|
||||
|
||||
switch (ppos) {
|
||||
case AMDGPU_SYS_REG_STATE_XGMI:
|
||||
bytes_read = amdgpu_asic_get_reg_state(
|
||||
adev, AMDGPU_REG_STATE_TYPE_XGMI, buf, count);
|
||||
break;
|
||||
case AMDGPU_SYS_REG_STATE_WAFL:
|
||||
bytes_read = amdgpu_asic_get_reg_state(
|
||||
adev, AMDGPU_REG_STATE_TYPE_WAFL, buf, count);
|
||||
break;
|
||||
case AMDGPU_SYS_REG_STATE_PCIE:
|
||||
bytes_read = amdgpu_asic_get_reg_state(
|
||||
adev, AMDGPU_REG_STATE_TYPE_PCIE, buf, count);
|
||||
break;
|
||||
case AMDGPU_SYS_REG_STATE_USR:
|
||||
bytes_read = amdgpu_asic_get_reg_state(
|
||||
adev, AMDGPU_REG_STATE_TYPE_USR, buf, count);
|
||||
break;
|
||||
case AMDGPU_SYS_REG_STATE_USR_1:
|
||||
bytes_read = amdgpu_asic_get_reg_state(
|
||||
adev, AMDGPU_REG_STATE_TYPE_USR_1, buf, count);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return bytes_read;
|
||||
}
|
||||
|
||||
BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
|
||||
AMDGPU_SYS_REG_STATE_END);
|
||||
|
||||
int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!amdgpu_asic_get_reg_state_supported(adev))
|
||||
return 0;
|
||||
|
||||
ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (!amdgpu_asic_get_reg_state_supported(adev))
|
||||
return;
|
||||
sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: board_info
|
||||
*
|
||||
@ -1551,11 +1610,15 @@ bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
|
||||
* https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
|
||||
* https://gitlab.freedesktop.org/drm/amd/-/issues/2663
|
||||
*/
|
||||
static bool amdgpu_device_pcie_dynamic_switching_supported(void)
|
||||
static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device *adev)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_X86)
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
/* eGPU change speeds based on USB4 fabric conditions */
|
||||
if (dev_is_removable(adev->dev))
|
||||
return true;
|
||||
|
||||
if (c->x86_vendor == X86_VENDOR_INTEL)
|
||||
return false;
|
||||
#endif
|
||||
@ -2395,7 +2458,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
||||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
||||
if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
|
||||
adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
|
||||
if (!amdgpu_device_pcie_dynamic_switching_supported())
|
||||
if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
|
||||
adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
|
||||
|
||||
total = true;
|
||||
@ -2676,6 +2739,12 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
|
||||
goto init_failed;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_seq64_init(adev);
|
||||
if (r) {
|
||||
DRM_ERROR("allocate seq64 failed %d\n", r);
|
||||
goto init_failed;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -3138,6 +3207,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
|
||||
amdgpu_device_wb_fini(adev);
|
||||
amdgpu_device_mem_scratch_fini(adev);
|
||||
amdgpu_ib_pool_fini(adev);
|
||||
amdgpu_seq64_fini(adev);
|
||||
}
|
||||
|
||||
r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
|
||||
@ -4222,6 +4292,7 @@ fence_driver_init:
|
||||
"Could not create amdgpu board attributes\n");
|
||||
|
||||
amdgpu_fru_sysfs_init(adev);
|
||||
amdgpu_reg_state_sysfs_init(adev);
|
||||
|
||||
if (IS_ENABLED(CONFIG_PERF_EVENTS))
|
||||
r = amdgpu_pmu_init(adev);
|
||||
@ -4344,6 +4415,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
|
||||
sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
|
||||
amdgpu_fru_sysfs_fini(adev);
|
||||
|
||||
amdgpu_reg_state_sysfs_fini(adev);
|
||||
|
||||
/* disable ras feature must before hw fini */
|
||||
amdgpu_ras_pre_fini(adev);
|
||||
|
||||
@ -4538,6 +4611,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_release_full_gpu(adev, false);
|
||||
|
||||
r = amdgpu_dpm_notify_rlc_state(adev, false);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -5730,6 +5807,39 @@ recover_end:
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @speed: pointer to the speed of the link
|
||||
* @width: pointer to the width of the link
|
||||
*
|
||||
* Evaluate the hierarchy to find the speed and bandwidth capabilities of the
|
||||
* first physical partner to an AMD dGPU.
|
||||
* This will exclude any virtual switches and links.
|
||||
*/
|
||||
static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
|
||||
enum pci_bus_speed *speed,
|
||||
enum pcie_link_width *width)
|
||||
{
|
||||
struct pci_dev *parent = adev->pdev;
|
||||
|
||||
if (!speed || !width)
|
||||
return;
|
||||
|
||||
*speed = PCI_SPEED_UNKNOWN;
|
||||
*width = PCIE_LNK_WIDTH_UNKNOWN;
|
||||
|
||||
while ((parent = pci_upstream_bridge(parent))) {
|
||||
/* skip upstream/downstream switches internal to dGPU*/
|
||||
if (parent->vendor == PCI_VENDOR_ID_ATI)
|
||||
continue;
|
||||
*speed = pcie_get_speed_cap(parent);
|
||||
*width = pcie_get_width_cap(parent);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
|
||||
*
|
||||
@ -5763,8 +5873,8 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
|
||||
if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
|
||||
return;
|
||||
|
||||
pcie_bandwidth_available(adev->pdev, NULL,
|
||||
&platform_speed_cap, &platform_link_width);
|
||||
amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
|
||||
&platform_link_width);
|
||||
|
||||
if (adev->pm.pcie_gen_mask == 0) {
|
||||
/* asic caps */
|
||||
|
@ -340,14 +340,11 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
|
||||
adev->have_disp_power_ref = true;
|
||||
return ret;
|
||||
}
|
||||
/* if we have no active crtcs, then drop the power ref
|
||||
* we got before
|
||||
/* if we have no active crtcs, then go to
|
||||
* drop the power ref we got before
|
||||
*/
|
||||
if (!active && adev->have_disp_power_ref) {
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
if (!active && adev->have_disp_power_ref)
|
||||
adev->have_disp_power_ref = false;
|
||||
}
|
||||
|
||||
out:
|
||||
/* drop the power reference we got coming in here */
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include <linux/dma-fence-array.h>
|
||||
#include <linux/pci-p2pdma.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include "amdgpu_trace.h"
|
||||
|
||||
/**
|
||||
* amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
|
||||
@ -63,6 +64,7 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
|
||||
attach->peer2peer = false;
|
||||
|
||||
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||
trace_amdgpu_runpm_reference_dumps(1, __func__);
|
||||
if (r < 0)
|
||||
goto out;
|
||||
|
||||
@ -70,6 +72,7 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
|
||||
|
||||
out:
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
trace_amdgpu_runpm_reference_dumps(0, __func__);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -90,6 +93,7 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
|
||||
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
trace_amdgpu_runpm_reference_dumps(0, __func__);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2263,6 +2263,8 @@ retry_init:
|
||||
pm_runtime_mark_last_busy(ddev->dev);
|
||||
pm_runtime_put_autosuspend(ddev->dev);
|
||||
|
||||
pci_wake_from_d3(pdev, TRUE);
|
||||
|
||||
/*
|
||||
* For runpm implemented via BACO, PMFW will handle the
|
||||
* timing for BACO in and out:
|
||||
|
@ -183,6 +183,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
|
||||
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
|
||||
seq, flags | AMDGPU_FENCE_FLAG_INT);
|
||||
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
|
||||
trace_amdgpu_runpm_reference_dumps(1, __func__);
|
||||
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
|
||||
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
|
||||
struct dma_fence *old;
|
||||
@ -310,6 +311,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
|
||||
dma_fence_put(fence);
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
trace_amdgpu_runpm_reference_dumps(0, __func__);
|
||||
} while (last_seq != seq);
|
||||
|
||||
return true;
|
||||
|
@ -181,6 +181,9 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
||||
|
||||
if (!bo->ttm)
|
||||
return AMDGPU_BO_INVALID_OFFSET;
|
||||
|
||||
if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
|
||||
return AMDGPU_BO_INVALID_OFFSET;
|
||||
|
||||
|
@ -1428,6 +1428,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
|
||||
fpriv->csa_va = NULL;
|
||||
}
|
||||
|
||||
amdgpu_seq64_unmap(adev, fpriv);
|
||||
|
||||
pasid = fpriv->vm.pasid;
|
||||
pd = amdgpu_bo_ref(fpriv->vm.root.bo);
|
||||
if (!WARN_ON(amdgpu_bo_reserve(pd, true))) {
|
||||
|
@ -377,7 +377,7 @@ static int amdgpu_mca_smu_debug_mode_set(void *data, u64 val)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)data;
|
||||
int ret;
|
||||
|
||||
ret = amdgpu_mca_smu_set_debug_mode(adev, val ? true : false);
|
||||
ret = amdgpu_ras_set_mca_debug_mode(adev, val ? true : false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -485,7 +485,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(mca_debug_mode_fops, NULL, amdgpu_mca_smu_debug_mode_se
|
||||
void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
if (!root || adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 6))
|
||||
if (!root || amdgpu_ip_version(adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 6))
|
||||
return;
|
||||
|
||||
debugfs_create_file("mca_debug_mode", 0200, root, adev, &mca_debug_mode_fops);
|
||||
|
@ -1527,10 +1527,14 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
|
||||
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
uint64_t offset;
|
||||
uint64_t offset = AMDGPU_BO_INVALID_OFFSET;
|
||||
|
||||
offset = (bo->tbo.resource->start << PAGE_SHIFT) +
|
||||
amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
|
||||
if (bo->tbo.resource->mem_type == TTM_PL_TT)
|
||||
offset = amdgpu_gmc_agp_addr(&bo->tbo);
|
||||
|
||||
if (offset == AMDGPU_BO_INVALID_OFFSET)
|
||||
offset = (bo->tbo.resource->start << PAGE_SHIFT) +
|
||||
amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
|
||||
|
||||
return amdgpu_gmc_sign_extend(offset);
|
||||
}
|
||||
|
@ -3132,6 +3132,9 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev)
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
/* enable MCA debug on APU device */
|
||||
amdgpu_ras_set_mca_debug_mode(adev, !!(adev->flags & AMD_IS_APU));
|
||||
|
||||
list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
|
||||
if (!node->ras_obj) {
|
||||
dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
|
||||
@ -3405,12 +3408,18 @@ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
|
||||
int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||
int ret = 0;
|
||||
|
||||
if (con)
|
||||
con->is_mca_debug_mode = enable;
|
||||
if (con) {
|
||||
ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
|
||||
if (!ret)
|
||||
con->is_mca_debug_mode = enable;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev)
|
||||
|
@ -773,7 +773,7 @@ struct amdgpu_ras* amdgpu_ras_get_context(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con);
|
||||
|
||||
void amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable);
|
||||
int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable);
|
||||
bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev);
|
||||
bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
|
||||
unsigned int *mode);
|
||||
|
@ -214,6 +214,12 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
|
||||
control->i2c_address = EEPROM_I2C_MADDR_0;
|
||||
return true;
|
||||
case IP_VERSION(13, 0, 0):
|
||||
if (strnstr(atom_ctx->vbios_pn, "D707",
|
||||
sizeof(atom_ctx->vbios_pn)))
|
||||
control->i2c_address = EEPROM_I2C_MADDR_0;
|
||||
else
|
||||
control->i2c_address = EEPROM_I2C_MADDR_4;
|
||||
return true;
|
||||
case IP_VERSION(13, 0, 6):
|
||||
case IP_VERSION(13, 0, 10):
|
||||
control->i2c_address = EEPROM_I2C_MADDR_4;
|
||||
|
247
drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
Normal file
247
drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
Normal file
@ -0,0 +1,247 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_seq64.h"
|
||||
|
||||
#include <drm/drm_exec.h>
|
||||
|
||||
/**
|
||||
* DOC: amdgpu_seq64
|
||||
*
|
||||
* amdgpu_seq64 allocates a 64bit memory on each request in sequence order.
|
||||
* seq64 driver is required for user queue fence memory allocation, TLB
|
||||
* counters and VM updates. It has maximum count of 32768 64 bit slots.
|
||||
*/
|
||||
|
||||
/**
|
||||
* amdgpu_seq64_map - Map the seq64 memory to VM
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @vm: vm pointer
|
||||
* @bo_va: bo_va pointer
|
||||
* @seq64_addr: seq64 vaddr start address
|
||||
* @size: seq64 pool size
|
||||
*
|
||||
* Map the seq64 memory to the given VM.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success or a negative error code on failure
|
||||
*/
|
||||
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo_va **bo_va, u64 seq64_addr,
|
||||
uint32_t size)
|
||||
{
|
||||
struct amdgpu_bo *bo;
|
||||
struct drm_exec exec;
|
||||
int r;
|
||||
|
||||
bo = adev->seq64.sbo;
|
||||
if (!bo)
|
||||
return -EINVAL;
|
||||
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
|
||||
drm_exec_until_all_locked(&exec) {
|
||||
r = amdgpu_vm_lock_pd(vm, &exec, 0);
|
||||
if (likely(!r))
|
||||
r = drm_exec_lock_obj(&exec, &bo->tbo.base);
|
||||
drm_exec_retry_on_contention(&exec);
|
||||
if (unlikely(r))
|
||||
goto error;
|
||||
}
|
||||
|
||||
*bo_va = amdgpu_vm_bo_add(adev, vm, bo);
|
||||
if (!*bo_va) {
|
||||
r = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, size,
|
||||
AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
|
||||
AMDGPU_PTE_EXECUTABLE);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r);
|
||||
amdgpu_vm_bo_del(adev, *bo_va);
|
||||
goto error;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_bo_update(adev, *bo_va, false);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to do vm_bo_update on userq sem\n");
|
||||
amdgpu_vm_bo_del(adev, *bo_va);
|
||||
goto error;
|
||||
}
|
||||
|
||||
error:
|
||||
drm_exec_fini(&exec);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_seq64_unmap - Unmap the seq64 memory
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @fpriv: DRM file private
|
||||
*
|
||||
* Unmap the seq64 memory from the given VM.
|
||||
*/
|
||||
void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv)
|
||||
{
|
||||
struct amdgpu_vm *vm;
|
||||
struct amdgpu_bo *bo;
|
||||
struct drm_exec exec;
|
||||
int r;
|
||||
|
||||
if (!fpriv->seq64_va)
|
||||
return;
|
||||
|
||||
bo = adev->seq64.sbo;
|
||||
if (!bo)
|
||||
return;
|
||||
|
||||
vm = &fpriv->vm;
|
||||
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
|
||||
drm_exec_until_all_locked(&exec) {
|
||||
r = amdgpu_vm_lock_pd(vm, &exec, 0);
|
||||
if (likely(!r))
|
||||
r = drm_exec_lock_obj(&exec, &bo->tbo.base);
|
||||
drm_exec_retry_on_contention(&exec);
|
||||
if (unlikely(r))
|
||||
goto error;
|
||||
}
|
||||
|
||||
amdgpu_vm_bo_del(adev, fpriv->seq64_va);
|
||||
|
||||
fpriv->seq64_va = NULL;
|
||||
|
||||
error:
|
||||
drm_exec_fini(&exec);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_seq64_alloc - Allocate a 64 bit memory
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @gpu_addr: allocated gpu VA start address
|
||||
* @cpu_addr: allocated cpu VA start address
|
||||
*
|
||||
* Alloc a 64 bit memory from seq64 pool.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success or a negative error code on failure
|
||||
*/
|
||||
int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr,
|
||||
u64 **cpu_addr)
|
||||
{
|
||||
unsigned long bit_pos;
|
||||
u32 offset;
|
||||
|
||||
bit_pos = find_first_zero_bit(adev->seq64.used, adev->seq64.num_sem);
|
||||
|
||||
if (bit_pos < adev->seq64.num_sem) {
|
||||
__set_bit(bit_pos, adev->seq64.used);
|
||||
offset = bit_pos << 6; /* convert to qw offset */
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*gpu_addr = offset + AMDGPU_SEQ64_VADDR_START;
|
||||
*cpu_addr = offset + adev->seq64.cpu_base_addr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_seq64_free - Free the given 64 bit memory
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @gpu_addr: gpu start address to be freed
|
||||
*
|
||||
* Free the given 64 bit memory from seq64 pool.
|
||||
*
|
||||
*/
|
||||
void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr)
|
||||
{
|
||||
u32 offset;
|
||||
|
||||
offset = gpu_addr - AMDGPU_SEQ64_VADDR_START;
|
||||
|
||||
offset >>= 6;
|
||||
if (offset < adev->seq64.num_sem)
|
||||
__clear_bit(offset, adev->seq64.used);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_seq64_fini - Cleanup seq64 driver
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Free the memory space allocated for seq64.
|
||||
*
|
||||
*/
|
||||
void amdgpu_seq64_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_bo_free_kernel(&adev->seq64.sbo,
|
||||
NULL,
|
||||
(void **)&adev->seq64.cpu_base_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_seq64_init - Initialize seq64 driver
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Allocate the required memory space for seq64.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success or a negative error code on failure
|
||||
*/
|
||||
int amdgpu_seq64_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (adev->seq64.sbo)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* AMDGPU_MAX_SEQ64_SLOTS * sizeof(u64) * 8 = AMDGPU_MAX_SEQ64_SLOTS
|
||||
* 64bit slots
|
||||
*/
|
||||
r = amdgpu_bo_create_kernel(adev, AMDGPU_SEQ64_SIZE,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
|
||||
&adev->seq64.sbo, NULL,
|
||||
(void **)&adev->seq64.cpu_base_addr);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create seq64 failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
memset(adev->seq64.cpu_base_addr, 0, AMDGPU_SEQ64_SIZE);
|
||||
|
||||
adev->seq64.num_sem = AMDGPU_MAX_SEQ64_SLOTS;
|
||||
memset(&adev->seq64.used, 0, sizeof(adev->seq64.used));
|
||||
|
||||
return 0;
|
||||
}
|
49
drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
Normal file
49
drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
Normal file
@ -0,0 +1,49 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_SEQ64_H__
|
||||
#define __AMDGPU_SEQ64_H__
|
||||
|
||||
#define AMDGPU_SEQ64_SIZE (2ULL << 20)
|
||||
#define AMDGPU_MAX_SEQ64_SLOTS (AMDGPU_SEQ64_SIZE / (sizeof(u64) * 8))
|
||||
#define AMDGPU_SEQ64_VADDR_OFFSET 0x50000
|
||||
#define AMDGPU_SEQ64_VADDR_START (AMDGPU_VA_RESERVED_SIZE + AMDGPU_SEQ64_VADDR_OFFSET)
|
||||
|
||||
struct amdgpu_seq64 {
|
||||
struct amdgpu_bo *sbo;
|
||||
u32 num_sem;
|
||||
u64 *cpu_base_addr;
|
||||
DECLARE_BITMAP(used, AMDGPU_MAX_SEQ64_SLOTS);
|
||||
};
|
||||
|
||||
void amdgpu_seq64_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_seq64_init(struct amdgpu_device *adev);
|
||||
int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr, u64 **cpu_addr);
|
||||
void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr);
|
||||
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo_va **bo_va, u64 seq64_addr, uint32_t size);
|
||||
void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv);
|
||||
|
||||
#endif
|
||||
|
@ -554,6 +554,21 @@ TRACE_EVENT(amdgpu_reset_reg_dumps,
|
||||
__entry->value)
|
||||
);
|
||||
|
||||
TRACE_EVENT(amdgpu_runpm_reference_dumps,
|
||||
TP_PROTO(uint32_t index, const char *func),
|
||||
TP_ARGS(index, func),
|
||||
TP_STRUCT__entry(
|
||||
__field(uint32_t, index)
|
||||
__string(func, func)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->index = index;
|
||||
__assign_str(func, func);
|
||||
),
|
||||
TP_printk("amdgpu runpm reference dump 0x%x: 0x%s\n",
|
||||
__entry->index,
|
||||
__get_str(func))
|
||||
);
|
||||
#undef AMDGPU_JOB_GET_TIMELINE_NAME
|
||||
#endif
|
||||
|
||||
|
@ -959,10 +959,8 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
|
||||
return 0;
|
||||
|
||||
addr = amdgpu_gmc_agp_addr(bo);
|
||||
if (addr != AMDGPU_BO_INVALID_OFFSET) {
|
||||
bo->resource->start = addr >> PAGE_SHIFT;
|
||||
if (addr != AMDGPU_BO_INVALID_OFFSET)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* allocate GART space */
|
||||
placement.num_placement = 1;
|
||||
|
@ -1437,6 +1437,51 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @vm: requested vm
|
||||
* @flush_type: flush type
|
||||
* @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush.
|
||||
*
|
||||
* Flush TLB if needed for a compute VM.
|
||||
*
|
||||
* Returns:
|
||||
* 0 for success.
|
||||
*/
|
||||
int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
uint32_t flush_type,
|
||||
uint32_t xcc_mask)
|
||||
{
|
||||
uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
|
||||
bool all_hub = false;
|
||||
int xcc = 0, r = 0;
|
||||
|
||||
WARN_ON_ONCE(!vm->is_compute_context);
|
||||
|
||||
/*
|
||||
* It can be that we race and lose here, but that is extremely unlikely
|
||||
* and the worst thing which could happen is that we flush the changes
|
||||
* into the TLB once more which is harmless.
|
||||
*/
|
||||
if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq)
|
||||
return 0;
|
||||
|
||||
if (adev->family == AMDGPU_FAMILY_AI ||
|
||||
adev->family == AMDGPU_FAMILY_RV)
|
||||
all_hub = true;
|
||||
|
||||
for_each_inst(xcc, xcc_mask) {
|
||||
r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type,
|
||||
all_hub, xcc);
|
||||
if (r)
|
||||
break;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_bo_add - add a bo to a specific vm
|
||||
*
|
||||
|
@ -324,6 +324,7 @@ struct amdgpu_vm {
|
||||
/* Last finished delayed update */
|
||||
atomic64_t tlb_seq;
|
||||
struct dma_fence *last_tlb_flush;
|
||||
atomic64_t kfd_last_flushed_seq;
|
||||
|
||||
/* How many times we had to re-generate the page tables */
|
||||
uint64_t generation;
|
||||
@ -445,6 +446,10 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
||||
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct ww_acquire_ctx *ticket);
|
||||
int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
uint32_t flush_type,
|
||||
uint32_t xcc_mask);
|
||||
void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
|
||||
struct amdgpu_vm *vm, struct amdgpu_bo *bo);
|
||||
int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
|
@ -413,6 +413,38 @@ static ssize_t amdgpu_xgmi_show_num_links(struct device *dev,
|
||||
return sysfs_emit(buf, "%s\n", buf);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_xgmi_show_connected_port_num(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(ddev);
|
||||
struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
|
||||
int i, j, size = 0;
|
||||
int current_node;
|
||||
/*
|
||||
* get the node id in the sysfs for the current socket and show
|
||||
* it in the port num info output in the sysfs for easy reading.
|
||||
* it is NOT the one retrieved from xgmi ta.
|
||||
*/
|
||||
for (i = 0; i < top->num_nodes; i++) {
|
||||
if (top->nodes[i].node_id == adev->gmc.xgmi.node_id) {
|
||||
current_node = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < top->num_nodes; i++) {
|
||||
for (j = 0; j < top->nodes[i].num_links; j++)
|
||||
/* node id in sysfs starts from 1 rather than 0 so +1 here */
|
||||
size += sysfs_emit_at(buf, size, "%02x:%02x -> %02x:%02x\n", current_node + 1,
|
||||
top->nodes[i].port_num[j].src_xgmi_port_num, i + 1,
|
||||
top->nodes[i].port_num[j].dst_xgmi_port_num);
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
#define AMDGPU_XGMI_SET_FICAA(o) ((o) | 0x456801)
|
||||
static ssize_t amdgpu_xgmi_show_error(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
@ -452,6 +484,7 @@ static DEVICE_ATTR(xgmi_physical_id, 0444, amdgpu_xgmi_show_physical_id, NULL);
|
||||
static DEVICE_ATTR(xgmi_error, S_IRUGO, amdgpu_xgmi_show_error, NULL);
|
||||
static DEVICE_ATTR(xgmi_num_hops, S_IRUGO, amdgpu_xgmi_show_num_hops, NULL);
|
||||
static DEVICE_ATTR(xgmi_num_links, S_IRUGO, amdgpu_xgmi_show_num_links, NULL);
|
||||
static DEVICE_ATTR(xgmi_port_num, S_IRUGO, amdgpu_xgmi_show_connected_port_num, NULL);
|
||||
|
||||
static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
|
||||
struct amdgpu_hive_info *hive)
|
||||
@ -487,6 +520,13 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
|
||||
if (ret)
|
||||
pr_err("failed to create xgmi_num_links\n");
|
||||
|
||||
/* Create xgmi port num file if supported */
|
||||
if (adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG) {
|
||||
ret = device_create_file(adev->dev, &dev_attr_xgmi_port_num);
|
||||
if (ret)
|
||||
dev_err(adev->dev, "failed to create xgmi_port_num\n");
|
||||
}
|
||||
|
||||
/* Create sysfs link to hive info folder on the first device */
|
||||
if (hive->kobj.parent != (&adev->dev->kobj)) {
|
||||
ret = sysfs_create_link(&adev->dev->kobj, &hive->kobj,
|
||||
@ -517,6 +557,8 @@ remove_file:
|
||||
device_remove_file(adev->dev, &dev_attr_xgmi_error);
|
||||
device_remove_file(adev->dev, &dev_attr_xgmi_num_hops);
|
||||
device_remove_file(adev->dev, &dev_attr_xgmi_num_links);
|
||||
if (adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG)
|
||||
device_remove_file(adev->dev, &dev_attr_xgmi_port_num);
|
||||
|
||||
success:
|
||||
return ret;
|
||||
@ -533,6 +575,8 @@ static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
|
||||
device_remove_file(adev->dev, &dev_attr_xgmi_error);
|
||||
device_remove_file(adev->dev, &dev_attr_xgmi_num_hops);
|
||||
device_remove_file(adev->dev, &dev_attr_xgmi_num_links);
|
||||
if (adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG)
|
||||
device_remove_file(adev->dev, &dev_attr_xgmi_port_num);
|
||||
|
||||
if (hive->kobj.parent != (&adev->dev->kobj))
|
||||
sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info");
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include "soc15.h"
|
||||
|
||||
#include "soc15_common.h"
|
||||
#include "amdgpu_reg_state.h"
|
||||
#include "amdgpu_xcp.h"
|
||||
#include "gfx_v9_4_3.h"
|
||||
#include "gfxhub_v1_2.h"
|
||||
@ -656,3 +657,213 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void aqua_read_smn(struct amdgpu_device *adev,
|
||||
struct amdgpu_smn_reg_data *regdata,
|
||||
uint64_t smn_addr)
|
||||
{
|
||||
regdata->addr = smn_addr;
|
||||
regdata->value = RREG32_PCIE(smn_addr);
|
||||
}
|
||||
|
||||
struct aqua_reg_list {
|
||||
uint64_t start_addr;
|
||||
uint32_t num_regs;
|
||||
uint32_t incrx;
|
||||
};
|
||||
|
||||
#define DW_ADDR_INCR 4
|
||||
|
||||
static void aqua_read_smn_ext(struct amdgpu_device *adev,
|
||||
struct amdgpu_smn_reg_data *regdata,
|
||||
uint64_t smn_addr, int i)
|
||||
{
|
||||
regdata->addr =
|
||||
smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i);
|
||||
regdata->value = RREG32_PCIE_EXT(regdata->addr);
|
||||
}
|
||||
|
||||
#define smnreg_0x1A340218 0x1A340218
|
||||
#define smnreg_0x1A3402E4 0x1A3402E4
|
||||
#define smnreg_0x1A340294 0x1A340294
|
||||
#define smreg_0x1A380088 0x1A380088
|
||||
|
||||
#define NUM_PCIE_SMN_REGS 14
|
||||
|
||||
static struct aqua_reg_list pcie_reg_addrs[] = {
|
||||
{ smnreg_0x1A340218, 1, 0 },
|
||||
{ smnreg_0x1A3402E4, 1, 0 },
|
||||
{ smnreg_0x1A340294, 6, DW_ADDR_INCR },
|
||||
{ smreg_0x1A380088, 6, DW_ADDR_INCR },
|
||||
};
|
||||
|
||||
static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev,
|
||||
void *buf, size_t max_size)
|
||||
{
|
||||
struct amdgpu_reg_state_pcie_v1_0 *pcie_reg_state;
|
||||
uint32_t start_addr, incrx, num_regs, szbuf;
|
||||
struct amdgpu_regs_pcie_v1_0 *pcie_regs;
|
||||
struct amdgpu_smn_reg_data *reg_data;
|
||||
struct pci_dev *us_pdev, *ds_pdev;
|
||||
int aer_cap, r, n;
|
||||
|
||||
if (!buf || !max_size)
|
||||
return -EINVAL;
|
||||
|
||||
pcie_reg_state = (struct amdgpu_reg_state_pcie_v1_0 *)buf;
|
||||
|
||||
szbuf = sizeof(*pcie_reg_state) +
|
||||
amdgpu_reginst_size(1, sizeof(*pcie_regs), NUM_PCIE_SMN_REGS);
|
||||
/* Only one instance of pcie regs */
|
||||
if (max_size < szbuf)
|
||||
return -EOVERFLOW;
|
||||
|
||||
pcie_regs = (struct amdgpu_regs_pcie_v1_0 *)((uint8_t *)buf +
|
||||
sizeof(*pcie_reg_state));
|
||||
pcie_regs->inst_header.instance = 0;
|
||||
pcie_regs->inst_header.state = AMDGPU_INST_S_OK;
|
||||
pcie_regs->inst_header.num_smn_regs = NUM_PCIE_SMN_REGS;
|
||||
|
||||
reg_data = pcie_regs->smn_reg_values;
|
||||
|
||||
for (r = 0; r < ARRAY_SIZE(pcie_reg_addrs); r++) {
|
||||
start_addr = pcie_reg_addrs[r].start_addr;
|
||||
incrx = pcie_reg_addrs[r].incrx;
|
||||
num_regs = pcie_reg_addrs[r].num_regs;
|
||||
for (n = 0; n < num_regs; n++) {
|
||||
aqua_read_smn(adev, reg_data, start_addr + n * incrx);
|
||||
++reg_data;
|
||||
}
|
||||
}
|
||||
|
||||
ds_pdev = pci_upstream_bridge(adev->pdev);
|
||||
us_pdev = pci_upstream_bridge(ds_pdev);
|
||||
|
||||
pcie_capability_read_word(us_pdev, PCI_EXP_DEVSTA,
|
||||
&pcie_regs->device_status);
|
||||
pcie_capability_read_word(us_pdev, PCI_EXP_LNKSTA,
|
||||
&pcie_regs->link_status);
|
||||
|
||||
aer_cap = pci_find_ext_capability(us_pdev, PCI_EXT_CAP_ID_ERR);
|
||||
if (aer_cap) {
|
||||
pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_COR_STATUS,
|
||||
&pcie_regs->pcie_corr_err_status);
|
||||
pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_UNCOR_STATUS,
|
||||
&pcie_regs->pcie_uncorr_err_status);
|
||||
}
|
||||
|
||||
pci_read_config_dword(us_pdev, PCI_PRIMARY_BUS,
|
||||
&pcie_regs->sub_bus_number_latency);
|
||||
|
||||
pcie_reg_state->common_header.structure_size = szbuf;
|
||||
pcie_reg_state->common_header.format_revision = 1;
|
||||
pcie_reg_state->common_header.content_revision = 0;
|
||||
pcie_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_PCIE;
|
||||
pcie_reg_state->common_header.num_instances = 1;
|
||||
|
||||
return pcie_reg_state->common_header.structure_size;
|
||||
}
|
||||
|
||||
#define smnreg_0x11A00050 0x11A00050
|
||||
#define smnreg_0x11A00180 0x11A00180
|
||||
#define smnreg_0x11A00070 0x11A00070
|
||||
#define smnreg_0x11A00200 0x11A00200
|
||||
#define smnreg_0x11A0020C 0x11A0020C
|
||||
#define smnreg_0x11A00210 0x11A00210
|
||||
#define smnreg_0x11A00108 0x11A00108
|
||||
|
||||
#define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
|
||||
|
||||
#define NUM_XGMI_SMN_REGS 25
|
||||
|
||||
static struct aqua_reg_list xgmi_reg_addrs[] = {
|
||||
{ smnreg_0x11A00050, 1, 0 },
|
||||
{ smnreg_0x11A00180, 16, DW_ADDR_INCR },
|
||||
{ smnreg_0x11A00070, 4, DW_ADDR_INCR },
|
||||
{ smnreg_0x11A00200, 1, 0 },
|
||||
{ smnreg_0x11A0020C, 1, 0 },
|
||||
{ smnreg_0x11A00210, 1, 0 },
|
||||
{ smnreg_0x11A00108, 1, 0 },
|
||||
};
|
||||
|
||||
static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev,
|
||||
void *buf, size_t max_size)
|
||||
{
|
||||
struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state;
|
||||
uint32_t start_addr, incrx, num_regs, szbuf;
|
||||
struct amdgpu_regs_xgmi_v1_0 *xgmi_regs;
|
||||
struct amdgpu_smn_reg_data *reg_data;
|
||||
const int max_xgmi_instances = 8;
|
||||
int inst = 0, i, j, r, n;
|
||||
const int xgmi_inst = 2;
|
||||
void *p;
|
||||
|
||||
if (!buf || !max_size)
|
||||
return -EINVAL;
|
||||
|
||||
xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf;
|
||||
|
||||
szbuf = sizeof(*xgmi_reg_state) +
|
||||
amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs),
|
||||
NUM_XGMI_SMN_REGS);
|
||||
/* Only one instance of pcie regs */
|
||||
if (max_size < szbuf)
|
||||
return -EOVERFLOW;
|
||||
|
||||
p = &xgmi_reg_state->xgmi_state_regs[0];
|
||||
for_each_inst(i, adev->aid_mask) {
|
||||
for (j = 0; j < xgmi_inst; ++j) {
|
||||
xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p;
|
||||
xgmi_regs->inst_header.instance = inst++;
|
||||
|
||||
xgmi_regs->inst_header.state = AMDGPU_INST_S_OK;
|
||||
xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS;
|
||||
|
||||
reg_data = xgmi_regs->smn_reg_values;
|
||||
|
||||
for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) {
|
||||
start_addr = xgmi_reg_addrs[r].start_addr;
|
||||
incrx = xgmi_reg_addrs[r].incrx;
|
||||
num_regs = xgmi_reg_addrs[r].num_regs;
|
||||
|
||||
for (n = 0; n < num_regs; n++) {
|
||||
aqua_read_smn_ext(
|
||||
adev, reg_data,
|
||||
XGMI_LINK_REG(start_addr, j) +
|
||||
n * incrx,
|
||||
i);
|
||||
++reg_data;
|
||||
}
|
||||
}
|
||||
p = reg_data;
|
||||
}
|
||||
}
|
||||
|
||||
xgmi_reg_state->common_header.structure_size = szbuf;
|
||||
xgmi_reg_state->common_header.format_revision = 1;
|
||||
xgmi_reg_state->common_header.content_revision = 0;
|
||||
xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI;
|
||||
xgmi_reg_state->common_header.num_instances = max_xgmi_instances;
|
||||
|
||||
return xgmi_reg_state->common_header.structure_size;
|
||||
}
|
||||
|
||||
ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
|
||||
enum amdgpu_reg_state reg_state, void *buf,
|
||||
size_t max_size)
|
||||
{
|
||||
ssize_t size;
|
||||
|
||||
switch (reg_state) {
|
||||
case AMDGPU_REG_STATE_TYPE_PCIE:
|
||||
size = aqua_vanjaram_read_pcie_state(adev, buf, max_size);
|
||||
break;
|
||||
case AMDGPU_REG_STATE_TYPE_XGMI:
|
||||
size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
@ -67,6 +67,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_0_1_me.bin");
|
||||
@ -89,6 +90,10 @@ MODULE_FIRMWARE("amdgpu/gc_11_5_0_me.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_5_0_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_5_0_rlc.bin");
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000)
|
||||
};
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
|
||||
{
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010),
|
||||
@ -289,6 +294,9 @@ static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
|
||||
|
||||
static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return;
|
||||
|
||||
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
|
||||
case IP_VERSION(11, 0, 1):
|
||||
case IP_VERSION(11, 0, 4):
|
||||
@ -304,6 +312,10 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
default:
|
||||
break;
|
||||
}
|
||||
soc15_program_register_sequence(adev,
|
||||
golden_settings_gc_11_0,
|
||||
(const u32)ARRAY_SIZE(golden_settings_gc_11_0));
|
||||
|
||||
}
|
||||
|
||||
static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
|
||||
@ -419,7 +431,7 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
|
||||
cpu_ptr = &adev->wb.wb[index];
|
||||
|
||||
r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
|
||||
goto err1;
|
||||
@ -556,7 +568,11 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
|
||||
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 0) &&
|
||||
adev->pdev->revision == 0xCE)
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/gc_11_0_0_rlc_1.bin");
|
||||
else
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
|
||||
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
|
||||
if (err)
|
||||
goto out;
|
||||
|
@ -883,8 +883,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
gpu_addr = adev->wb.gpu_addr + (index * 4);
|
||||
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, 16,
|
||||
AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
|
||||
r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
if (r)
|
||||
goto err1;
|
||||
|
||||
|
@ -1039,8 +1039,8 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
gpu_addr = adev->wb.gpu_addr + (index * 4);
|
||||
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, 16,
|
||||
AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
|
||||
r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
if (r)
|
||||
goto err1;
|
||||
|
||||
|
@ -297,8 +297,8 @@ static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
gpu_addr = adev->wb.gpu_addr + (index * 4);
|
||||
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, 16,
|
||||
AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
|
||||
r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
if (r)
|
||||
goto err1;
|
||||
|
||||
@ -3882,150 +3882,6 @@ static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
}
|
||||
|
||||
static void gfx_v9_4_3_inst_query_utc_err_status(struct amdgpu_device *adev,
|
||||
int xcc_id)
|
||||
{
|
||||
uint32_t data;
|
||||
|
||||
data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS);
|
||||
if (data) {
|
||||
dev_warn(adev->dev, "GFX UTCL2 Mem Ecc Status: 0x%x!\n", data);
|
||||
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS, 0x3);
|
||||
}
|
||||
|
||||
data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS);
|
||||
if (data) {
|
||||
dev_warn(adev->dev, "GFX VML2 Mem Ecc Status: 0x%x!\n", data);
|
||||
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS, 0x3);
|
||||
}
|
||||
|
||||
data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
|
||||
regVML2_WALKER_MEM_ECC_STATUS);
|
||||
if (data) {
|
||||
dev_warn(adev->dev, "GFX VML2 Walker Mem Ecc Status: 0x%x!\n", data);
|
||||
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_WALKER_MEM_ECC_STATUS,
|
||||
0x3);
|
||||
}
|
||||
}
|
||||
|
||||
static void gfx_v9_4_3_log_cu_timeout_status(struct amdgpu_device *adev,
|
||||
uint32_t status, int xcc_id)
|
||||
{
|
||||
struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
|
||||
uint32_t i, simd, wave;
|
||||
uint32_t wave_status;
|
||||
uint32_t wave_pc_lo, wave_pc_hi;
|
||||
uint32_t wave_exec_lo, wave_exec_hi;
|
||||
uint32_t wave_inst_dw0, wave_inst_dw1;
|
||||
uint32_t wave_ib_sts;
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
if (!((i << 1) & status))
|
||||
continue;
|
||||
|
||||
simd = i / cu_info->max_waves_per_simd;
|
||||
wave = i % cu_info->max_waves_per_simd;
|
||||
|
||||
wave_status = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS);
|
||||
wave_pc_lo = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO);
|
||||
wave_pc_hi = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI);
|
||||
wave_exec_lo =
|
||||
wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO);
|
||||
wave_exec_hi =
|
||||
wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI);
|
||||
wave_inst_dw0 =
|
||||
wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0);
|
||||
wave_inst_dw1 =
|
||||
wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1);
|
||||
wave_ib_sts = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS);
|
||||
|
||||
dev_info(
|
||||
adev->dev,
|
||||
"\t SIMD %d, Wave %d: status 0x%x, pc 0x%llx, exec 0x%llx, inst 0x%llx, ib_sts 0x%x\n",
|
||||
simd, wave, wave_status,
|
||||
((uint64_t)wave_pc_hi << 32 | wave_pc_lo),
|
||||
((uint64_t)wave_exec_hi << 32 | wave_exec_lo),
|
||||
((uint64_t)wave_inst_dw1 << 32 | wave_inst_dw0),
|
||||
wave_ib_sts);
|
||||
}
|
||||
}
|
||||
|
||||
static void gfx_v9_4_3_inst_query_sq_timeout_status(struct amdgpu_device *adev,
|
||||
int xcc_id)
|
||||
{
|
||||
uint32_t se_idx, sh_idx, cu_idx;
|
||||
uint32_t status;
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines; se_idx++) {
|
||||
for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se; sh_idx++) {
|
||||
for (cu_idx = 0; cu_idx < adev->gfx.config.max_cu_per_sh; cu_idx++) {
|
||||
gfx_v9_4_3_xcc_select_se_sh(adev, se_idx, sh_idx,
|
||||
cu_idx, xcc_id);
|
||||
status = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
|
||||
regSQ_TIMEOUT_STATUS);
|
||||
if (status != 0) {
|
||||
dev_info(
|
||||
adev->dev,
|
||||
"GFX Watchdog Timeout: SE %d, SH %d, CU %d\n",
|
||||
se_idx, sh_idx, cu_idx);
|
||||
gfx_v9_4_3_log_cu_timeout_status(
|
||||
adev, status, xcc_id);
|
||||
}
|
||||
/* clear old status */
|
||||
WREG32_SOC15(GC, GET_INST(GC, xcc_id),
|
||||
regSQ_TIMEOUT_STATUS, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
xcc_id);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
}
|
||||
|
||||
static void gfx_v9_4_3_inst_query_ras_err_status(struct amdgpu_device *adev,
|
||||
void *ras_error_status, int xcc_id)
|
||||
{
|
||||
gfx_v9_4_3_inst_query_utc_err_status(adev, xcc_id);
|
||||
gfx_v9_4_3_inst_query_sq_timeout_status(adev, xcc_id);
|
||||
}
|
||||
|
||||
static void gfx_v9_4_3_inst_reset_utc_err_status(struct amdgpu_device *adev,
|
||||
int xcc_id)
|
||||
{
|
||||
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS, 0x3);
|
||||
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS, 0x3);
|
||||
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_WALKER_MEM_ECC_STATUS, 0x3);
|
||||
}
|
||||
|
||||
static void gfx_v9_4_3_inst_reset_sq_timeout_status(struct amdgpu_device *adev,
|
||||
int xcc_id)
|
||||
{
|
||||
uint32_t se_idx, sh_idx, cu_idx;
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines; se_idx++) {
|
||||
for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se; sh_idx++) {
|
||||
for (cu_idx = 0; cu_idx < adev->gfx.config.max_cu_per_sh; cu_idx++) {
|
||||
gfx_v9_4_3_xcc_select_se_sh(adev, se_idx, sh_idx,
|
||||
cu_idx, xcc_id);
|
||||
WREG32_SOC15(GC, GET_INST(GC, xcc_id),
|
||||
regSQ_TIMEOUT_STATUS, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
xcc_id);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
}
|
||||
|
||||
static void gfx_v9_4_3_inst_reset_ras_err_status(struct amdgpu_device *adev,
|
||||
void *ras_error_status, int xcc_id)
|
||||
{
|
||||
gfx_v9_4_3_inst_reset_utc_err_status(adev, xcc_id);
|
||||
gfx_v9_4_3_inst_reset_sq_timeout_status(adev, xcc_id);
|
||||
}
|
||||
|
||||
static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev,
|
||||
void *ras_error_status, int xcc_id)
|
||||
{
|
||||
@ -4067,16 +3923,6 @@ static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev)
|
||||
amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_count);
|
||||
}
|
||||
|
||||
static void gfx_v9_4_3_query_ras_error_status(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_query_ras_err_status);
|
||||
}
|
||||
|
||||
static void gfx_v9_4_3_reset_ras_error_status(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_status);
|
||||
}
|
||||
|
||||
static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer);
|
||||
@ -4394,8 +4240,6 @@ struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = {
|
||||
struct amdgpu_ras_block_hw_ops gfx_v9_4_3_ras_ops = {
|
||||
.query_ras_error_count = &gfx_v9_4_3_query_ras_error_count,
|
||||
.reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count,
|
||||
.query_ras_error_status = &gfx_v9_4_3_query_ras_error_status,
|
||||
.reset_ras_error_status = &gfx_v9_4_3_reset_ras_error_status,
|
||||
};
|
||||
|
||||
struct amdgpu_gfx_ras gfx_v9_4_3_ras = {
|
||||
|
@ -259,17 +259,17 @@ const struct nbio_hdp_flush_reg nbio_v7_11_hdp_flush_reg = {
|
||||
|
||||
static void nbio_v7_11_init_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
/* uint32_t def, data;
|
||||
uint32_t def, data;
|
||||
|
||||
def = data = RREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3);
|
||||
data = REG_SET_FIELD(data, BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3,
|
||||
CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
|
||||
data = REG_SET_FIELD(data, BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3,
|
||||
CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
|
||||
def = data = RREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3);
|
||||
data = REG_SET_FIELD(data, BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3,
|
||||
CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
|
||||
data = REG_SET_FIELD(data, BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3,
|
||||
CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3, data);
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3, data);
|
||||
*/
|
||||
}
|
||||
|
||||
static void nbio_v7_11_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
|
@ -611,11 +611,6 @@ static void nbio_v7_9_handle_ras_controller_intr_no_bifring(struct amdgpu_device
|
||||
|
||||
dev_info(adev->dev, "RAS controller interrupt triggered "
|
||||
"by NBIF error\n");
|
||||
|
||||
/* ras_controller_int is dedicated for nbif ras error,
|
||||
* not the global interrupt for sync flood
|
||||
*/
|
||||
amdgpu_ras_reset_gpu(adev);
|
||||
}
|
||||
|
||||
amdgpu_ras_error_data_fini(&err_data);
|
||||
|
@ -902,6 +902,7 @@ static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs =
|
||||
.pre_asic_init = &soc15_pre_asic_init,
|
||||
.query_video_codecs = &soc15_query_video_codecs,
|
||||
.encode_ext_smn_addressing = &aqua_vanjaram_encode_ext_smn_addressing,
|
||||
.get_reg_state = &aqua_vanjaram_get_reg_state,
|
||||
};
|
||||
|
||||
static int soc15_common_early_init(void *handle)
|
||||
@ -1161,6 +1162,11 @@ static int soc15_common_early_init(void *handle)
|
||||
AMD_PG_SUPPORT_VCN_DPG |
|
||||
AMD_PG_SUPPORT_JPEG;
|
||||
adev->external_rev_id = adev->rev_id + 0x46;
|
||||
/* GC 9.4.3 uses MMIO register region hole at a different offset */
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
adev->rmmio_remap.reg_offset = 0x1A000;
|
||||
adev->rmmio_remap.bus_addr = adev->rmmio_base + 0x1A000;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "nbio_v6_1.h"
|
||||
#include "nbio_v7_0.h"
|
||||
#include "nbio_v7_4.h"
|
||||
#include "amdgpu_reg_state.h"
|
||||
|
||||
extern const struct amdgpu_ip_block_version vega10_common_ip_block;
|
||||
|
||||
@ -114,6 +115,9 @@ int aldebaran_reg_base_init(struct amdgpu_device *adev);
|
||||
void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev);
|
||||
u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id);
|
||||
int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev);
|
||||
ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
|
||||
enum amdgpu_reg_state reg_state, void *buf,
|
||||
size_t max_size);
|
||||
|
||||
void vega10_doorbell_index_init(struct amdgpu_device *adev);
|
||||
void vega20_doorbell_index_init(struct amdgpu_device *adev);
|
||||
|
@ -2069,7 +2069,7 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
|
||||
};
|
||||
|
||||
static const uint32_t cwsr_trap_gfx10_hex[] = {
|
||||
0xbf820001, 0xbf820220,
|
||||
0xbf820001, 0xbf820221,
|
||||
0xb0804004, 0xb978f802,
|
||||
0x8a78ff78, 0x00020006,
|
||||
0xb97bf803, 0x876eff78,
|
||||
@ -2118,391 +2118,391 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
|
||||
0xbf900004, 0xbf8cc07f,
|
||||
0x877aff7f, 0x04000000,
|
||||
0x8f7a857a, 0x886d7a6d,
|
||||
0xbefa037e, 0x877bff7f,
|
||||
0x0000ffff, 0xbefe03c1,
|
||||
0xbeff03c1, 0xdc5f8000,
|
||||
0x007a0000, 0x7e000280,
|
||||
0xbefe037a, 0xbeff037b,
|
||||
0xb97b02dc, 0x8f7b997b,
|
||||
0xb97a3a05, 0x807a817a,
|
||||
0xbf0d997b, 0xbf850002,
|
||||
0x8f7a897a, 0xbf820001,
|
||||
0x8f7a8a7a, 0xb97b1e06,
|
||||
0x8f7b8a7b, 0x807a7b7a,
|
||||
0x7e008200, 0xbefa037e,
|
||||
0x877bff7f, 0x0000ffff,
|
||||
0x807aff7a, 0x00000200,
|
||||
0x807a7e7a, 0x827b807b,
|
||||
0xd7610000, 0x00010870,
|
||||
0xd7610000, 0x00010a71,
|
||||
0xd7610000, 0x00010c72,
|
||||
0xd7610000, 0x00010e73,
|
||||
0xd7610000, 0x00011074,
|
||||
0xd7610000, 0x00011275,
|
||||
0xd7610000, 0x00011476,
|
||||
0xd7610000, 0x00011677,
|
||||
0xd7610000, 0x00011a79,
|
||||
0xd7610000, 0x00011c7e,
|
||||
0xd7610000, 0x00011e7f,
|
||||
0xbefe03ff, 0x00003fff,
|
||||
0xbeff0380, 0xdc5f8040,
|
||||
0x007a0000, 0xd760007a,
|
||||
0x00011d00, 0xd760007b,
|
||||
0x00011f00, 0xbefe037a,
|
||||
0xbeff037b, 0xbef4037e,
|
||||
0x8775ff7f, 0x0000ffff,
|
||||
0x8875ff75, 0x00040000,
|
||||
0xbef60380, 0xbef703ff,
|
||||
0x10807fac, 0xbef1037c,
|
||||
0xbef00380, 0xb97302dc,
|
||||
0x8f739973, 0xbefe03c1,
|
||||
0x907c9973, 0x877c817c,
|
||||
0xbf06817c, 0xbf850002,
|
||||
0xbeff0380, 0xbf820002,
|
||||
0xbeff03c1, 0xbf820009,
|
||||
0xbefe03c1, 0xbeff03c1,
|
||||
0xdc5f8000, 0x007a0000,
|
||||
0x7e000280, 0xbefe037a,
|
||||
0xbeff037b, 0xb97b02dc,
|
||||
0x8f7b997b, 0xb97a3a05,
|
||||
0x807a817a, 0xbf0d997b,
|
||||
0xbf850002, 0x8f7a897a,
|
||||
0xbf820001, 0x8f7a8a7a,
|
||||
0xb97b1e06, 0x8f7b8a7b,
|
||||
0x807a7b7a, 0x877bff7f,
|
||||
0x0000ffff, 0x807aff7a,
|
||||
0x00000200, 0x807a7e7a,
|
||||
0x827b807b, 0xd7610000,
|
||||
0x00010870, 0xd7610000,
|
||||
0x00010a71, 0xd7610000,
|
||||
0x00010c72, 0xd7610000,
|
||||
0x00010e73, 0xd7610000,
|
||||
0x00011074, 0xd7610000,
|
||||
0x00011275, 0xd7610000,
|
||||
0x00011476, 0xd7610000,
|
||||
0x00011677, 0xd7610000,
|
||||
0x00011a79, 0xd7610000,
|
||||
0x00011c7e, 0xd7610000,
|
||||
0x00011e7f, 0xbefe03ff,
|
||||
0x00003fff, 0xbeff0380,
|
||||
0xdc5f8040, 0x007a0000,
|
||||
0xd760007a, 0x00011d00,
|
||||
0xd760007b, 0x00011f00,
|
||||
0xbefe037a, 0xbeff037b,
|
||||
0xbef4037e, 0x8775ff7f,
|
||||
0x0000ffff, 0x8875ff75,
|
||||
0x00040000, 0xbef60380,
|
||||
0xbef703ff, 0x10807fac,
|
||||
0xbef1037c, 0xbef00380,
|
||||
0xb97302dc, 0x8f739973,
|
||||
0xbefe03c1, 0x907c9973,
|
||||
0x877c817c, 0xbf06817c,
|
||||
0xbf850002, 0xbeff0380,
|
||||
0xbf820002, 0xbeff03c1,
|
||||
0xbf820009, 0xbef603ff,
|
||||
0x01000000, 0xe0704080,
|
||||
0x705d0100, 0xe0704100,
|
||||
0x705d0200, 0xe0704180,
|
||||
0x705d0300, 0xbf820008,
|
||||
0xbef603ff, 0x01000000,
|
||||
0xe0704080, 0x705d0100,
|
||||
0xe0704100, 0x705d0200,
|
||||
0xe0704180, 0x705d0300,
|
||||
0xbf820008, 0xbef603ff,
|
||||
0x01000000, 0xe0704100,
|
||||
0x705d0100, 0xe0704200,
|
||||
0x705d0200, 0xe0704300,
|
||||
0x705d0300, 0xb9703a05,
|
||||
0x80708170, 0xbf0d9973,
|
||||
0xbf850002, 0x8f708970,
|
||||
0xbf820001, 0x8f708a70,
|
||||
0xb97a1e06, 0x8f7a8a7a,
|
||||
0x80707a70, 0x8070ff70,
|
||||
0x00000200, 0xbef603ff,
|
||||
0x01000000, 0x7e000280,
|
||||
0x7e020280, 0x7e040280,
|
||||
0xbefc0380, 0xd7610002,
|
||||
0x0000f871, 0x807c817c,
|
||||
0xd7610002, 0x0000f86c,
|
||||
0x807c817c, 0x8a7aff6d,
|
||||
0x80000000, 0xd7610002,
|
||||
0x0000f87a, 0x807c817c,
|
||||
0xd7610002, 0x0000f86e,
|
||||
0x807c817c, 0xd7610002,
|
||||
0x0000f86f, 0x807c817c,
|
||||
0xd7610002, 0x0000f878,
|
||||
0x807c817c, 0xb97af803,
|
||||
0xd7610002, 0x0000f87a,
|
||||
0x807c817c, 0xd7610002,
|
||||
0x0000f87b, 0x807c817c,
|
||||
0xb971f801, 0xd7610002,
|
||||
0x0000f871, 0x807c817c,
|
||||
0xb971f814, 0xd7610002,
|
||||
0x0000f871, 0x807c817c,
|
||||
0xb971f815, 0xd7610002,
|
||||
0x0000f871, 0x807c817c,
|
||||
0xbefe03ff, 0x0000ffff,
|
||||
0xbeff0380, 0xe0704000,
|
||||
0x705d0200, 0xbefe03c1,
|
||||
0xe0704100, 0x705d0100,
|
||||
0xe0704200, 0x705d0200,
|
||||
0xe0704300, 0x705d0300,
|
||||
0xb9703a05, 0x80708170,
|
||||
0xbf0d9973, 0xbf850002,
|
||||
0x8f708970, 0xbf820001,
|
||||
0x8f708a70, 0xb97a1e06,
|
||||
0x8f7a8a7a, 0x80707a70,
|
||||
0x8070ff70, 0x00000200,
|
||||
0xbef603ff, 0x01000000,
|
||||
0xbef90380, 0xbefc0380,
|
||||
0xbf800000, 0xbe802f00,
|
||||
0xbe822f02, 0xbe842f04,
|
||||
0xbe862f06, 0xbe882f08,
|
||||
0xbe8a2f0a, 0xbe8c2f0c,
|
||||
0xbe8e2f0e, 0xd7610002,
|
||||
0x0000f200, 0x80798179,
|
||||
0xd7610002, 0x0000f201,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f202, 0x80798179,
|
||||
0xd7610002, 0x0000f203,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f204, 0x80798179,
|
||||
0xd7610002, 0x0000f205,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f206, 0x80798179,
|
||||
0xd7610002, 0x0000f207,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f208, 0x80798179,
|
||||
0xd7610002, 0x0000f209,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f20a, 0x80798179,
|
||||
0xd7610002, 0x0000f20b,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f20c, 0x80798179,
|
||||
0xd7610002, 0x0000f20d,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f20e, 0x80798179,
|
||||
0xd7610002, 0x0000f20f,
|
||||
0x80798179, 0xbf06a079,
|
||||
0xbf840006, 0xe0704000,
|
||||
0x705d0200, 0x8070ff70,
|
||||
0x00000080, 0xbef90380,
|
||||
0x7e040280, 0x807c907c,
|
||||
0xbf0aff7c, 0x00000060,
|
||||
0xbf85ffbc, 0xbe802f00,
|
||||
0xbe822f02, 0xbe842f04,
|
||||
0xbe862f06, 0xbe882f08,
|
||||
0xbe8a2f0a, 0xd7610002,
|
||||
0x0000f200, 0x80798179,
|
||||
0xd7610002, 0x0000f201,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f202, 0x80798179,
|
||||
0xd7610002, 0x0000f203,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f204, 0x80798179,
|
||||
0xd7610002, 0x0000f205,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f206, 0x80798179,
|
||||
0xd7610002, 0x0000f207,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f208, 0x80798179,
|
||||
0xd7610002, 0x0000f209,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f20a, 0x80798179,
|
||||
0xd7610002, 0x0000f20b,
|
||||
0x80798179, 0xe0704000,
|
||||
0x705d0200, 0xbefe03c1,
|
||||
0x907c9973, 0x877c817c,
|
||||
0xbf06817c, 0xbf850002,
|
||||
0xbeff0380, 0xbf820001,
|
||||
0xbeff03c1, 0xb97b4306,
|
||||
0x877bc17b, 0xbf840044,
|
||||
0xbf8a0000, 0x877aff6d,
|
||||
0x80000000, 0xbf840040,
|
||||
0x8f7b867b, 0x8f7b827b,
|
||||
0xbef6037b, 0xb9703a05,
|
||||
0x7e000280, 0x7e020280,
|
||||
0x7e040280, 0xbefc0380,
|
||||
0xd7610002, 0x0000f871,
|
||||
0x807c817c, 0xd7610002,
|
||||
0x0000f86c, 0x807c817c,
|
||||
0x8a7aff6d, 0x80000000,
|
||||
0xd7610002, 0x0000f87a,
|
||||
0x807c817c, 0xd7610002,
|
||||
0x0000f86e, 0x807c817c,
|
||||
0xd7610002, 0x0000f86f,
|
||||
0x807c817c, 0xd7610002,
|
||||
0x0000f878, 0x807c817c,
|
||||
0xb97af803, 0xd7610002,
|
||||
0x0000f87a, 0x807c817c,
|
||||
0xd7610002, 0x0000f87b,
|
||||
0x807c817c, 0xb971f801,
|
||||
0xd7610002, 0x0000f871,
|
||||
0x807c817c, 0xb971f814,
|
||||
0xd7610002, 0x0000f871,
|
||||
0x807c817c, 0xb971f815,
|
||||
0xd7610002, 0x0000f871,
|
||||
0x807c817c, 0xbefe03ff,
|
||||
0x0000ffff, 0xbeff0380,
|
||||
0xe0704000, 0x705d0200,
|
||||
0xbefe03c1, 0xb9703a05,
|
||||
0x80708170, 0xbf0d9973,
|
||||
0xbf850002, 0x8f708970,
|
||||
0xbf820001, 0x8f708a70,
|
||||
0xb97a1e06, 0x8f7a8a7a,
|
||||
0x80707a70, 0x8070ff70,
|
||||
0x00000200, 0x8070ff70,
|
||||
0x00000080, 0xbef603ff,
|
||||
0x01000000, 0xd7650000,
|
||||
0x000100c1, 0xd7660000,
|
||||
0x000200c1, 0x16000084,
|
||||
0x907c9973, 0x877c817c,
|
||||
0xbf06817c, 0xbefc0380,
|
||||
0xbf850012, 0xbe8303ff,
|
||||
0x00000080, 0xbf800000,
|
||||
0xbf800000, 0xbf800000,
|
||||
0xd8d80000, 0x01000000,
|
||||
0xbf8c0000, 0xe0704000,
|
||||
0x705d0100, 0x807c037c,
|
||||
0x80700370, 0xd5250000,
|
||||
0x0001ff00, 0x00000080,
|
||||
0xbf0a7b7c, 0xbf85fff4,
|
||||
0xbf820011, 0xbe8303ff,
|
||||
0x00000100, 0xbf800000,
|
||||
0xbf800000, 0xbf800000,
|
||||
0xd8d80000, 0x01000000,
|
||||
0xbf8c0000, 0xe0704000,
|
||||
0x705d0100, 0x807c037c,
|
||||
0x80700370, 0xd5250000,
|
||||
0x0001ff00, 0x00000100,
|
||||
0xbf0a7b7c, 0xbf85fff4,
|
||||
0x80707a70, 0xbef603ff,
|
||||
0x01000000, 0xbef90380,
|
||||
0xbefc0380, 0xbf800000,
|
||||
0xbe802f00, 0xbe822f02,
|
||||
0xbe842f04, 0xbe862f06,
|
||||
0xbe882f08, 0xbe8a2f0a,
|
||||
0xbe8c2f0c, 0xbe8e2f0e,
|
||||
0xd7610002, 0x0000f200,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f201, 0x80798179,
|
||||
0xd7610002, 0x0000f202,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f203, 0x80798179,
|
||||
0xd7610002, 0x0000f204,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f205, 0x80798179,
|
||||
0xd7610002, 0x0000f206,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f207, 0x80798179,
|
||||
0xd7610002, 0x0000f208,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f209, 0x80798179,
|
||||
0xd7610002, 0x0000f20a,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f20b, 0x80798179,
|
||||
0xd7610002, 0x0000f20c,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f20d, 0x80798179,
|
||||
0xd7610002, 0x0000f20e,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f20f, 0x80798179,
|
||||
0xbf06a079, 0xbf840006,
|
||||
0xe0704000, 0x705d0200,
|
||||
0x8070ff70, 0x00000080,
|
||||
0xbef90380, 0x7e040280,
|
||||
0x807c907c, 0xbf0aff7c,
|
||||
0x00000060, 0xbf85ffbc,
|
||||
0xbe802f00, 0xbe822f02,
|
||||
0xbe842f04, 0xbe862f06,
|
||||
0xbe882f08, 0xbe8a2f0a,
|
||||
0xd7610002, 0x0000f200,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f201, 0x80798179,
|
||||
0xd7610002, 0x0000f202,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f203, 0x80798179,
|
||||
0xd7610002, 0x0000f204,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f205, 0x80798179,
|
||||
0xd7610002, 0x0000f206,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f207, 0x80798179,
|
||||
0xd7610002, 0x0000f208,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f209, 0x80798179,
|
||||
0xd7610002, 0x0000f20a,
|
||||
0x80798179, 0xd7610002,
|
||||
0x0000f20b, 0x80798179,
|
||||
0xe0704000, 0x705d0200,
|
||||
0xbefe03c1, 0x907c9973,
|
||||
0x877c817c, 0xbf06817c,
|
||||
0xbf850004, 0xbef003ff,
|
||||
0x00000200, 0xbeff0380,
|
||||
0xbf820003, 0xbef003ff,
|
||||
0x00000400, 0xbeff03c1,
|
||||
0xb97b3a05, 0x807b817b,
|
||||
0x8f7b827b, 0x907c9973,
|
||||
0xbf850002, 0xbeff0380,
|
||||
0xbf820001, 0xbeff03c1,
|
||||
0xb97b4306, 0x877bc17b,
|
||||
0xbf840044, 0xbf8a0000,
|
||||
0x877aff6d, 0x80000000,
|
||||
0xbf840040, 0x8f7b867b,
|
||||
0x8f7b827b, 0xbef6037b,
|
||||
0xb9703a05, 0x80708170,
|
||||
0xbf0d9973, 0xbf850002,
|
||||
0x8f708970, 0xbf820001,
|
||||
0x8f708a70, 0xb97a1e06,
|
||||
0x8f7a8a7a, 0x80707a70,
|
||||
0x8070ff70, 0x00000200,
|
||||
0x8070ff70, 0x00000080,
|
||||
0xbef603ff, 0x01000000,
|
||||
0xd7650000, 0x000100c1,
|
||||
0xd7660000, 0x000200c1,
|
||||
0x16000084, 0x907c9973,
|
||||
0x877c817c, 0xbf06817c,
|
||||
0xbf850017, 0xbef603ff,
|
||||
0xbefc0380, 0xbf850012,
|
||||
0xbe8303ff, 0x00000080,
|
||||
0xbf800000, 0xbf800000,
|
||||
0xbf800000, 0xd8d80000,
|
||||
0x01000000, 0xbf8c0000,
|
||||
0xe0704000, 0x705d0100,
|
||||
0x807c037c, 0x80700370,
|
||||
0xd5250000, 0x0001ff00,
|
||||
0x00000080, 0xbf0a7b7c,
|
||||
0xbf85fff4, 0xbf820011,
|
||||
0xbe8303ff, 0x00000100,
|
||||
0xbf800000, 0xbf800000,
|
||||
0xbf800000, 0xd8d80000,
|
||||
0x01000000, 0xbf8c0000,
|
||||
0xe0704000, 0x705d0100,
|
||||
0x807c037c, 0x80700370,
|
||||
0xd5250000, 0x0001ff00,
|
||||
0x00000100, 0xbf0a7b7c,
|
||||
0xbf85fff4, 0xbefe03c1,
|
||||
0x907c9973, 0x877c817c,
|
||||
0xbf06817c, 0xbf850004,
|
||||
0xbef003ff, 0x00000200,
|
||||
0xbeff0380, 0xbf820003,
|
||||
0xbef003ff, 0x00000400,
|
||||
0xbeff03c1, 0xb97b3a05,
|
||||
0x807b817b, 0x8f7b827b,
|
||||
0x907c9973, 0x877c817c,
|
||||
0xbf06817c, 0xbf850017,
|
||||
0xbef603ff, 0x01000000,
|
||||
0xbefc0384, 0xbf0a7b7c,
|
||||
0xbf840037, 0x7e008700,
|
||||
0x7e028701, 0x7e048702,
|
||||
0x7e068703, 0xe0704000,
|
||||
0x705d0000, 0xe0704080,
|
||||
0x705d0100, 0xe0704100,
|
||||
0x705d0200, 0xe0704180,
|
||||
0x705d0300, 0x807c847c,
|
||||
0x8070ff70, 0x00000200,
|
||||
0xbf0a7b7c, 0xbf85ffef,
|
||||
0xbf820025, 0xbef603ff,
|
||||
0x01000000, 0xbefc0384,
|
||||
0xbf0a7b7c, 0xbf840037,
|
||||
0xbf0a7b7c, 0xbf840011,
|
||||
0x7e008700, 0x7e028701,
|
||||
0x7e048702, 0x7e068703,
|
||||
0xe0704000, 0x705d0000,
|
||||
0xe0704080, 0x705d0100,
|
||||
0xe0704100, 0x705d0200,
|
||||
0xe0704180, 0x705d0300,
|
||||
0xe0704100, 0x705d0100,
|
||||
0xe0704200, 0x705d0200,
|
||||
0xe0704300, 0x705d0300,
|
||||
0x807c847c, 0x8070ff70,
|
||||
0x00000200, 0xbf0a7b7c,
|
||||
0xbf85ffef, 0xbf820025,
|
||||
0xbef603ff, 0x01000000,
|
||||
0xbefc0384, 0xbf0a7b7c,
|
||||
0xbf840011, 0x7e008700,
|
||||
0x7e028701, 0x7e048702,
|
||||
0x7e068703, 0xe0704000,
|
||||
0x705d0000, 0xe0704100,
|
||||
0x705d0100, 0xe0704200,
|
||||
0x705d0200, 0xe0704300,
|
||||
0x705d0300, 0x807c847c,
|
||||
0x8070ff70, 0x00000400,
|
||||
0xbf0a7b7c, 0xbf85ffef,
|
||||
0xb97b1e06, 0x877bc17b,
|
||||
0xbf84000c, 0x8f7b837b,
|
||||
0x807b7c7b, 0xbefe03c1,
|
||||
0xbeff0380, 0x7e008700,
|
||||
0xe0704000, 0x705d0000,
|
||||
0x807c817c, 0x8070ff70,
|
||||
0x00000080, 0xbf0a7b7c,
|
||||
0xbf85fff8, 0xbf82013b,
|
||||
0xbef4037e, 0x8775ff7f,
|
||||
0x0000ffff, 0x8875ff75,
|
||||
0x00040000, 0xbef60380,
|
||||
0xbef703ff, 0x10807fac,
|
||||
0xb97202dc, 0x8f729972,
|
||||
0x876eff7f, 0x04000000,
|
||||
0xbf840034, 0xbefe03c1,
|
||||
0x907c9972, 0x877c817c,
|
||||
0xbf06817c, 0xbf850002,
|
||||
0xbeff0380, 0xbf820001,
|
||||
0xbeff03c1, 0xb96f4306,
|
||||
0x876fc16f, 0xbf840029,
|
||||
0x8f6f866f, 0x8f6f826f,
|
||||
0xbef6036f, 0xb9783a05,
|
||||
0x80788178, 0xbf0d9972,
|
||||
0xbf850002, 0x8f788978,
|
||||
0xbf820001, 0x8f788a78,
|
||||
0xb96e1e06, 0x8f6e8a6e,
|
||||
0x80786e78, 0x8078ff78,
|
||||
0x00000200, 0x8078ff78,
|
||||
0x00000080, 0xbef603ff,
|
||||
0x01000000, 0x907c9972,
|
||||
0x877c817c, 0xbf06817c,
|
||||
0xbefc0380, 0xbf850009,
|
||||
0xe0310000, 0x781d0000,
|
||||
0x807cff7c, 0x00000080,
|
||||
0x8078ff78, 0x00000080,
|
||||
0xbf0a6f7c, 0xbf85fff8,
|
||||
0xbf820008, 0xe0310000,
|
||||
0x781d0000, 0x807cff7c,
|
||||
0x00000100, 0x8078ff78,
|
||||
0x00000100, 0xbf0a6f7c,
|
||||
0xbf85fff8, 0xbef80380,
|
||||
0x00000400, 0xbf0a7b7c,
|
||||
0xbf85ffef, 0xb97b1e06,
|
||||
0x877bc17b, 0xbf84000c,
|
||||
0x8f7b837b, 0x807b7c7b,
|
||||
0xbefe03c1, 0xbeff0380,
|
||||
0x7e008700, 0xe0704000,
|
||||
0x705d0000, 0x807c817c,
|
||||
0x8070ff70, 0x00000080,
|
||||
0xbf0a7b7c, 0xbf85fff8,
|
||||
0xbf82013b, 0xbef4037e,
|
||||
0x8775ff7f, 0x0000ffff,
|
||||
0x8875ff75, 0x00040000,
|
||||
0xbef60380, 0xbef703ff,
|
||||
0x10807fac, 0xb97202dc,
|
||||
0x8f729972, 0x876eff7f,
|
||||
0x04000000, 0xbf840034,
|
||||
0xbefe03c1, 0x907c9972,
|
||||
0x877c817c, 0xbf06817c,
|
||||
0xbf850002, 0xbeff0380,
|
||||
0xbf820001, 0xbeff03c1,
|
||||
0xb96f3a05, 0x806f816f,
|
||||
0x8f6f826f, 0x907c9972,
|
||||
0x877c817c, 0xbf06817c,
|
||||
0xbf850024, 0xbef603ff,
|
||||
0x01000000, 0xbeee0378,
|
||||
0x8078ff78, 0x00000200,
|
||||
0xbefc0384, 0xbf0a6f7c,
|
||||
0xbf840050, 0xe0304000,
|
||||
0x785d0000, 0xe0304080,
|
||||
0x785d0100, 0xe0304100,
|
||||
0x785d0200, 0xe0304180,
|
||||
0x785d0300, 0xbf8c3f70,
|
||||
0x7e008500, 0x7e028501,
|
||||
0x7e048502, 0x7e068503,
|
||||
0x807c847c, 0x8078ff78,
|
||||
0x00000200, 0xbf0a6f7c,
|
||||
0xbf85ffee, 0xe0304000,
|
||||
0x6e5d0000, 0xe0304080,
|
||||
0x6e5d0100, 0xe0304100,
|
||||
0x6e5d0200, 0xe0304180,
|
||||
0x6e5d0300, 0xbf8c3f70,
|
||||
0xbf820034, 0xbef603ff,
|
||||
0x01000000, 0xbeee0378,
|
||||
0x8078ff78, 0x00000400,
|
||||
0xbefc0384, 0xbf0a6f7c,
|
||||
0xbf840012, 0xe0304000,
|
||||
0x785d0000, 0xe0304100,
|
||||
0x785d0100, 0xe0304200,
|
||||
0x785d0200, 0xe0304300,
|
||||
0x785d0300, 0xbf8c3f70,
|
||||
0x7e008500, 0x7e028501,
|
||||
0x7e048502, 0x7e068503,
|
||||
0x807c847c, 0x8078ff78,
|
||||
0x00000400, 0xbf0a6f7c,
|
||||
0xbf85ffee, 0xb96f1e06,
|
||||
0x876fc16f, 0xbf84000e,
|
||||
0x8f6f836f, 0x806f7c6f,
|
||||
0xbefe03c1, 0xbeff0380,
|
||||
0xe0304000, 0x785d0000,
|
||||
0xbf8c3f70, 0x7e008500,
|
||||
0x807c817c, 0x8078ff78,
|
||||
0x00000080, 0xbf0a6f7c,
|
||||
0xbf85fff7, 0xbeff03c1,
|
||||
0xe0304000, 0x6e5d0000,
|
||||
0xe0304100, 0x6e5d0100,
|
||||
0xe0304200, 0x6e5d0200,
|
||||
0xe0304300, 0x6e5d0300,
|
||||
0xbf8c3f70, 0xb9783a05,
|
||||
0x80788178, 0xbf0d9972,
|
||||
0xbf850002, 0x8f788978,
|
||||
0xbf820001, 0x8f788a78,
|
||||
0xb96e1e06, 0x8f6e8a6e,
|
||||
0x80786e78, 0x8078ff78,
|
||||
0x00000200, 0x80f8ff78,
|
||||
0x00000050, 0xbef603ff,
|
||||
0x01000000, 0xbefc03ff,
|
||||
0x0000006c, 0x80f89078,
|
||||
0xf429003a, 0xf0000000,
|
||||
0xbf8cc07f, 0x80fc847c,
|
||||
0xbf800000, 0xbe803100,
|
||||
0xbe823102, 0x80f8a078,
|
||||
0xf42d003a, 0xf0000000,
|
||||
0xbf8cc07f, 0x80fc887c,
|
||||
0xbf800000, 0xbe803100,
|
||||
0xbe823102, 0xbe843104,
|
||||
0xbe863106, 0x80f8c078,
|
||||
0xf431003a, 0xf0000000,
|
||||
0xbf8cc07f, 0x80fc907c,
|
||||
0xbf800000, 0xbe803100,
|
||||
0xbe823102, 0xbe843104,
|
||||
0xbe863106, 0xbe883108,
|
||||
0xbe8a310a, 0xbe8c310c,
|
||||
0xbe8e310e, 0xbf06807c,
|
||||
0xbf84fff0, 0xba80f801,
|
||||
0x00000000, 0xbf8a0000,
|
||||
0xb96f4306, 0x876fc16f,
|
||||
0xbf840029, 0x8f6f866f,
|
||||
0x8f6f826f, 0xbef6036f,
|
||||
0xb9783a05, 0x80788178,
|
||||
0xbf0d9972, 0xbf850002,
|
||||
0x8f788978, 0xbf820001,
|
||||
0x8f788a78, 0xb96e1e06,
|
||||
0x8f6e8a6e, 0x80786e78,
|
||||
0x8078ff78, 0x00000200,
|
||||
0x8078ff78, 0x00000080,
|
||||
0xbef603ff, 0x01000000,
|
||||
0xf4211bfa, 0xf0000000,
|
||||
0x80788478, 0xf4211b3a,
|
||||
0x907c9972, 0x877c817c,
|
||||
0xbf06817c, 0xbefc0380,
|
||||
0xbf850009, 0xe0310000,
|
||||
0x781d0000, 0x807cff7c,
|
||||
0x00000080, 0x8078ff78,
|
||||
0x00000080, 0xbf0a6f7c,
|
||||
0xbf85fff8, 0xbf820008,
|
||||
0xe0310000, 0x781d0000,
|
||||
0x807cff7c, 0x00000100,
|
||||
0x8078ff78, 0x00000100,
|
||||
0xbf0a6f7c, 0xbf85fff8,
|
||||
0xbef80380, 0xbefe03c1,
|
||||
0x907c9972, 0x877c817c,
|
||||
0xbf06817c, 0xbf850002,
|
||||
0xbeff0380, 0xbf820001,
|
||||
0xbeff03c1, 0xb96f3a05,
|
||||
0x806f816f, 0x8f6f826f,
|
||||
0x907c9972, 0x877c817c,
|
||||
0xbf06817c, 0xbf850024,
|
||||
0xbef603ff, 0x01000000,
|
||||
0xbeee0378, 0x8078ff78,
|
||||
0x00000200, 0xbefc0384,
|
||||
0xbf0a6f7c, 0xbf840050,
|
||||
0xe0304000, 0x785d0000,
|
||||
0xe0304080, 0x785d0100,
|
||||
0xe0304100, 0x785d0200,
|
||||
0xe0304180, 0x785d0300,
|
||||
0xbf8c3f70, 0x7e008500,
|
||||
0x7e028501, 0x7e048502,
|
||||
0x7e068503, 0x807c847c,
|
||||
0x8078ff78, 0x00000200,
|
||||
0xbf0a6f7c, 0xbf85ffee,
|
||||
0xe0304000, 0x6e5d0000,
|
||||
0xe0304080, 0x6e5d0100,
|
||||
0xe0304100, 0x6e5d0200,
|
||||
0xe0304180, 0x6e5d0300,
|
||||
0xbf8c3f70, 0xbf820034,
|
||||
0xbef603ff, 0x01000000,
|
||||
0xbeee0378, 0x8078ff78,
|
||||
0x00000400, 0xbefc0384,
|
||||
0xbf0a6f7c, 0xbf840012,
|
||||
0xe0304000, 0x785d0000,
|
||||
0xe0304100, 0x785d0100,
|
||||
0xe0304200, 0x785d0200,
|
||||
0xe0304300, 0x785d0300,
|
||||
0xbf8c3f70, 0x7e008500,
|
||||
0x7e028501, 0x7e048502,
|
||||
0x7e068503, 0x807c847c,
|
||||
0x8078ff78, 0x00000400,
|
||||
0xbf0a6f7c, 0xbf85ffee,
|
||||
0xb96f1e06, 0x876fc16f,
|
||||
0xbf84000e, 0x8f6f836f,
|
||||
0x806f7c6f, 0xbefe03c1,
|
||||
0xbeff0380, 0xe0304000,
|
||||
0x785d0000, 0xbf8c3f70,
|
||||
0x7e008500, 0x807c817c,
|
||||
0x8078ff78, 0x00000080,
|
||||
0xbf0a6f7c, 0xbf85fff7,
|
||||
0xbeff03c1, 0xe0304000,
|
||||
0x6e5d0000, 0xe0304100,
|
||||
0x6e5d0100, 0xe0304200,
|
||||
0x6e5d0200, 0xe0304300,
|
||||
0x6e5d0300, 0xbf8c3f70,
|
||||
0xb9783a05, 0x80788178,
|
||||
0xbf0d9972, 0xbf850002,
|
||||
0x8f788978, 0xbf820001,
|
||||
0x8f788a78, 0xb96e1e06,
|
||||
0x8f6e8a6e, 0x80786e78,
|
||||
0x8078ff78, 0x00000200,
|
||||
0x80f8ff78, 0x00000050,
|
||||
0xbef603ff, 0x01000000,
|
||||
0xbefc03ff, 0x0000006c,
|
||||
0x80f89078, 0xf429003a,
|
||||
0xf0000000, 0xbf8cc07f,
|
||||
0x80fc847c, 0xbf800000,
|
||||
0xbe803100, 0xbe823102,
|
||||
0x80f8a078, 0xf42d003a,
|
||||
0xf0000000, 0xbf8cc07f,
|
||||
0x80fc887c, 0xbf800000,
|
||||
0xbe803100, 0xbe823102,
|
||||
0xbe843104, 0xbe863106,
|
||||
0x80f8c078, 0xf431003a,
|
||||
0xf0000000, 0xbf8cc07f,
|
||||
0x80fc907c, 0xbf800000,
|
||||
0xbe803100, 0xbe823102,
|
||||
0xbe843104, 0xbe863106,
|
||||
0xbe883108, 0xbe8a310a,
|
||||
0xbe8c310c, 0xbe8e310e,
|
||||
0xbf06807c, 0xbf84fff0,
|
||||
0xba80f801, 0x00000000,
|
||||
0xbf8a0000, 0xb9783a05,
|
||||
0x80788178, 0xbf0d9972,
|
||||
0xbf850002, 0x8f788978,
|
||||
0xbf820001, 0x8f788a78,
|
||||
0xb96e1e06, 0x8f6e8a6e,
|
||||
0x80786e78, 0x8078ff78,
|
||||
0x00000200, 0xbef603ff,
|
||||
0x01000000, 0xf4211bfa,
|
||||
0xf0000000, 0x80788478,
|
||||
0xf4211b7a, 0xf0000000,
|
||||
0x80788478, 0xf4211c3a,
|
||||
0xf4211b3a, 0xf0000000,
|
||||
0x80788478, 0xf4211b7a,
|
||||
0xf0000000, 0x80788478,
|
||||
0xf4211c7a, 0xf0000000,
|
||||
0x80788478, 0xf4211eba,
|
||||
0xf4211c3a, 0xf0000000,
|
||||
0x80788478, 0xf4211c7a,
|
||||
0xf0000000, 0x80788478,
|
||||
0xf4211efa, 0xf0000000,
|
||||
0x80788478, 0xf4211e7a,
|
||||
0xf4211eba, 0xf0000000,
|
||||
0x80788478, 0xf4211efa,
|
||||
0xf0000000, 0x80788478,
|
||||
0xf4211cfa, 0xf0000000,
|
||||
0x80788478, 0xf4211bba,
|
||||
0xf4211e7a, 0xf0000000,
|
||||
0x80788478, 0xf4211cfa,
|
||||
0xf0000000, 0x80788478,
|
||||
0xbf8cc07f, 0xb9eef814,
|
||||
0xf4211bba, 0xf0000000,
|
||||
0x80788478, 0xbf8cc07f,
|
||||
0xb9eef815, 0xbefc036f,
|
||||
0xbefe0370, 0xbeff0371,
|
||||
0x876f7bff, 0x000003ff,
|
||||
0xb9ef4803, 0x876f7bff,
|
||||
0xfffff800, 0x906f8b6f,
|
||||
0xb9efa2c3, 0xb9f3f801,
|
||||
0xb96e3a05, 0x806e816e,
|
||||
0xbf0d9972, 0xbf850002,
|
||||
0x8f6e896e, 0xbf820001,
|
||||
0x8f6e8a6e, 0xb96f1e06,
|
||||
0x8f6f8a6f, 0x806e6f6e,
|
||||
0x806eff6e, 0x00000200,
|
||||
0x806e746e, 0x826f8075,
|
||||
0x876fff6f, 0x0000ffff,
|
||||
0xf4091c37, 0xfa000050,
|
||||
0xf4091d37, 0xfa000060,
|
||||
0xf4011e77, 0xfa000074,
|
||||
0xbf8cc07f, 0x876dff6d,
|
||||
0x0000ffff, 0x87fe7e7e,
|
||||
0x87ea6a6a, 0xb9faf802,
|
||||
0xbe80226c, 0xbf810000,
|
||||
0xb9eef814, 0xf4211bba,
|
||||
0xf0000000, 0x80788478,
|
||||
0xbf8cc07f, 0xb9eef815,
|
||||
0xbefc036f, 0xbefe0370,
|
||||
0xbeff0371, 0x876f7bff,
|
||||
0x000003ff, 0xb9ef4803,
|
||||
0x876f7bff, 0xfffff800,
|
||||
0x906f8b6f, 0xb9efa2c3,
|
||||
0xb9f3f801, 0xb96e3a05,
|
||||
0x806e816e, 0xbf0d9972,
|
||||
0xbf850002, 0x8f6e896e,
|
||||
0xbf820001, 0x8f6e8a6e,
|
||||
0xb96f1e06, 0x8f6f8a6f,
|
||||
0x806e6f6e, 0x806eff6e,
|
||||
0x00000200, 0x806e746e,
|
||||
0x826f8075, 0x876fff6f,
|
||||
0x0000ffff, 0xf4091c37,
|
||||
0xfa000050, 0xf4091d37,
|
||||
0xfa000060, 0xf4011e77,
|
||||
0xfa000074, 0xbf8cc07f,
|
||||
0x876dff6d, 0x0000ffff,
|
||||
0x87fe7e7e, 0x87ea6a6a,
|
||||
0xb9faf802, 0xbe80226c,
|
||||
0xbf810000, 0xbf9f0000,
|
||||
0xbf9f0000, 0xbf9f0000,
|
||||
0xbf9f0000, 0xbf9f0000,
|
||||
0xbf9f0000, 0x00000000,
|
||||
};
|
||||
|
||||
static const uint32_t cwsr_trap_gfx11_hex[] = {
|
||||
|
@ -369,6 +369,12 @@ L_SLEEP:
|
||||
s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
|
||||
|
||||
#if NO_SQC_STORE
|
||||
#if ASIC_FAMILY <= CHIP_SIENNA_CICHLID
|
||||
// gfx10: If there was a VALU exception, the exception state must be
|
||||
// cleared before executing the VALU instructions below.
|
||||
v_clrexcp
|
||||
#endif
|
||||
|
||||
// Trap temporaries must be saved via VGPR but all VGPRs are in use.
|
||||
// There is no ttmp space to hold the resource constant for VGPR save.
|
||||
// Save v0 by itself since it requires only two SGPRs.
|
||||
|
@ -880,6 +880,10 @@ static int copy_signaled_event_data(uint32_t num_events,
|
||||
dst = &data[i].memory_exception_data;
|
||||
src = &event->memory_exception_data;
|
||||
size = sizeof(struct kfd_hsa_memory_exception_data);
|
||||
} else if (event->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
|
||||
dst = &data[i].memory_exception_data;
|
||||
src = &event->hw_exception_data;
|
||||
size = sizeof(struct kfd_hsa_hw_exception_data);
|
||||
} else if (event->type == KFD_EVENT_TYPE_SIGNAL &&
|
||||
waiter->event_age_enabled) {
|
||||
dst = &data[i].signal_event_data.last_event_age;
|
||||
|
@ -748,7 +748,6 @@ struct kfd_process_device {
|
||||
/* VM context for GPUVM allocations */
|
||||
struct file *drm_file;
|
||||
void *drm_priv;
|
||||
atomic64_t tlb_seq;
|
||||
|
||||
/* GPUVM allocations storage */
|
||||
struct idr alloc_idr;
|
||||
@ -1128,7 +1127,7 @@ static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev,
|
||||
struct kfd_dev *dev = adev->kfd.dev;
|
||||
uint32_t i;
|
||||
|
||||
if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3))
|
||||
if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3))
|
||||
return dev->nodes[0];
|
||||
|
||||
for (i = 0; i < dev->num_nodes; i++)
|
||||
@ -1462,7 +1461,14 @@ void kfd_signal_reset_event(struct kfd_node *dev);
|
||||
|
||||
void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid);
|
||||
|
||||
void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type);
|
||||
static inline void kfd_flush_tlb(struct kfd_process_device *pdd,
|
||||
enum TLB_FLUSH_TYPE type)
|
||||
{
|
||||
struct amdgpu_device *adev = pdd->dev->adev;
|
||||
struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
|
||||
|
||||
amdgpu_vm_flush_compute_tlb(adev, vm, type, pdd->dev->xcc_mask);
|
||||
}
|
||||
|
||||
static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
|
||||
{
|
||||
|
@ -664,7 +664,8 @@ int kfd_process_create_wq(void)
|
||||
if (!kfd_process_wq)
|
||||
kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
|
||||
if (!kfd_restore_wq)
|
||||
kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
|
||||
kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq",
|
||||
WQ_FREEZABLE);
|
||||
|
||||
if (!kfd_process_wq || !kfd_restore_wq) {
|
||||
kfd_process_destroy_wq();
|
||||
@ -1642,6 +1643,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
|
||||
struct amdgpu_fpriv *drv_priv;
|
||||
struct amdgpu_vm *avm;
|
||||
struct kfd_process *p;
|
||||
struct dma_fence *ef;
|
||||
struct kfd_node *dev;
|
||||
int ret;
|
||||
|
||||
@ -1661,13 +1663,13 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
|
||||
|
||||
ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm,
|
||||
&p->kgd_process_info,
|
||||
&p->ef);
|
||||
&ef);
|
||||
if (ret) {
|
||||
pr_err("Failed to create process VM object\n");
|
||||
return ret;
|
||||
}
|
||||
RCU_INIT_POINTER(p->ef, ef);
|
||||
pdd->drm_priv = drm_file->private_data;
|
||||
atomic64_set(&pdd->tlb_seq, 0);
|
||||
|
||||
ret = kfd_process_device_reserve_ib_mem(pdd);
|
||||
if (ret)
|
||||
@ -1909,6 +1911,21 @@ kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int signal_eviction_fence(struct kfd_process *p)
|
||||
{
|
||||
struct dma_fence *ef;
|
||||
int ret;
|
||||
|
||||
rcu_read_lock();
|
||||
ef = dma_fence_get_rcu_safe(&p->ef);
|
||||
rcu_read_unlock();
|
||||
|
||||
ret = dma_fence_signal(ef);
|
||||
dma_fence_put(ef);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void evict_process_worker(struct work_struct *work)
|
||||
{
|
||||
int ret;
|
||||
@ -1921,31 +1938,46 @@ static void evict_process_worker(struct work_struct *work)
|
||||
* lifetime of this thread, kfd_process p will be valid
|
||||
*/
|
||||
p = container_of(dwork, struct kfd_process, eviction_work);
|
||||
WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
|
||||
"Eviction fence mismatch\n");
|
||||
|
||||
/* Narrow window of overlap between restore and evict work
|
||||
* item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
|
||||
* unreserves KFD BOs, it is possible to evicted again. But
|
||||
* restore has few more steps of finish. So lets wait for any
|
||||
* previous restore work to complete
|
||||
*/
|
||||
flush_delayed_work(&p->restore_work);
|
||||
|
||||
pr_debug("Started evicting pasid 0x%x\n", p->pasid);
|
||||
ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM);
|
||||
if (!ret) {
|
||||
dma_fence_signal(p->ef);
|
||||
dma_fence_put(p->ef);
|
||||
p->ef = NULL;
|
||||
queue_delayed_work(kfd_restore_wq, &p->restore_work,
|
||||
/* If another thread already signaled the eviction fence,
|
||||
* they are responsible stopping the queues and scheduling
|
||||
* the restore work.
|
||||
*/
|
||||
if (!signal_eviction_fence(p))
|
||||
queue_delayed_work(kfd_restore_wq, &p->restore_work,
|
||||
msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
|
||||
else
|
||||
kfd_process_restore_queues(p);
|
||||
|
||||
pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
|
||||
} else
|
||||
pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
|
||||
}
|
||||
|
||||
static int restore_process_helper(struct kfd_process *p)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/* VMs may not have been acquired yet during debugging. */
|
||||
if (p->kgd_process_info) {
|
||||
ret = amdgpu_amdkfd_gpuvm_restore_process_bos(
|
||||
p->kgd_process_info, &p->ef);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = kfd_process_restore_queues(p);
|
||||
if (!ret)
|
||||
pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
|
||||
else
|
||||
pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void restore_process_worker(struct work_struct *work)
|
||||
{
|
||||
struct delayed_work *dwork;
|
||||
@ -1971,24 +2003,15 @@ static void restore_process_worker(struct work_struct *work)
|
||||
*/
|
||||
|
||||
p->last_restore_timestamp = get_jiffies_64();
|
||||
/* VMs may not have been acquired yet during debugging. */
|
||||
if (p->kgd_process_info)
|
||||
ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
|
||||
&p->ef);
|
||||
|
||||
ret = restore_process_helper(p);
|
||||
if (ret) {
|
||||
pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
|
||||
p->pasid, PROCESS_BACK_OFF_TIME_MS);
|
||||
ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
|
||||
msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
|
||||
WARN(!ret, "reschedule restore work failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
ret = kfd_process_restore_queues(p);
|
||||
if (!ret)
|
||||
pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
|
||||
else
|
||||
pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
|
||||
}
|
||||
|
||||
void kfd_suspend_all_processes(void)
|
||||
@ -1999,14 +2022,9 @@ void kfd_suspend_all_processes(void)
|
||||
|
||||
WARN(debug_evictions, "Evicting all processes");
|
||||
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
|
||||
cancel_delayed_work_sync(&p->eviction_work);
|
||||
flush_delayed_work(&p->restore_work);
|
||||
|
||||
if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND))
|
||||
pr_err("Failed to suspend process 0x%x\n", p->pasid);
|
||||
dma_fence_signal(p->ef);
|
||||
dma_fence_put(p->ef);
|
||||
p->ef = NULL;
|
||||
signal_eviction_fence(p);
|
||||
}
|
||||
srcu_read_unlock(&kfd_processes_srcu, idx);
|
||||
}
|
||||
@ -2018,7 +2036,7 @@ int kfd_resume_all_processes(void)
|
||||
int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
|
||||
|
||||
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
|
||||
if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
|
||||
if (restore_process_helper(p)) {
|
||||
pr_err("Restore process %d failed during resume\n",
|
||||
p->pasid);
|
||||
ret = -EFAULT;
|
||||
@ -2059,36 +2077,6 @@ int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process,
|
||||
KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
|
||||
}
|
||||
|
||||
void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
|
||||
{
|
||||
struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
|
||||
uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
|
||||
struct kfd_node *dev = pdd->dev;
|
||||
uint32_t xcc_mask = dev->xcc_mask;
|
||||
int xcc = 0;
|
||||
|
||||
/*
|
||||
* It can be that we race and lose here, but that is extremely unlikely
|
||||
* and the worst thing which could happen is that we flush the changes
|
||||
* into the TLB once more which is harmless.
|
||||
*/
|
||||
if (atomic64_xchg(&pdd->tlb_seq, tlb_seq) == tlb_seq)
|
||||
return;
|
||||
|
||||
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
|
||||
/* Nothing to flush until a VMID is assigned, which
|
||||
* only happens when the first queue is created.
|
||||
*/
|
||||
if (pdd->qpd.vmid)
|
||||
amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev,
|
||||
pdd->qpd.vmid);
|
||||
} else {
|
||||
for_each_inst(xcc, xcc_mask)
|
||||
amdgpu_amdkfd_flush_gpu_tlb_pasid(
|
||||
dev->adev, pdd->process->pasid, type, xcc);
|
||||
}
|
||||
}
|
||||
|
||||
/* assumes caller holds process lock. */
|
||||
int kfd_process_drain_interrupts(struct kfd_process_device *pdd)
|
||||
{
|
||||
|
@ -169,16 +169,43 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pqm_clean_queue_resource(struct process_queue_manager *pqm,
|
||||
struct process_queue_node *pqn)
|
||||
{
|
||||
struct kfd_node *dev;
|
||||
struct kfd_process_device *pdd;
|
||||
|
||||
dev = pqn->q->device;
|
||||
|
||||
pdd = kfd_get_process_device_data(dev, pqm->process);
|
||||
if (!pdd) {
|
||||
pr_err("Process device data doesn't exist\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (pqn->q->gws) {
|
||||
if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
|
||||
!dev->kfd->shared_resources.enable_mes)
|
||||
amdgpu_amdkfd_remove_gws_from_process(
|
||||
pqm->process->kgd_process_info, pqn->q->gws);
|
||||
pdd->qpd.num_gws = 0;
|
||||
}
|
||||
|
||||
if (dev->kfd->shared_resources.enable_mes) {
|
||||
amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->gang_ctx_bo);
|
||||
if (pqn->q->wptr_bo)
|
||||
amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->wptr_bo);
|
||||
}
|
||||
}
|
||||
|
||||
void pqm_uninit(struct process_queue_manager *pqm)
|
||||
{
|
||||
struct process_queue_node *pqn, *next;
|
||||
|
||||
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
|
||||
if (pqn->q && pqn->q->gws &&
|
||||
KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
|
||||
!pqn->q->device->kfd->shared_resources.enable_mes)
|
||||
amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
|
||||
pqn->q->gws);
|
||||
if (pqn->q)
|
||||
pqm_clean_queue_resource(pqm, pqn);
|
||||
|
||||
kfd_procfs_del_queue(pqn->q);
|
||||
uninit_queue(pqn->q);
|
||||
list_del(&pqn->process_queue_list);
|
||||
@ -461,22 +488,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
|
||||
goto err_destroy_queue;
|
||||
}
|
||||
|
||||
if (pqn->q->gws) {
|
||||
if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
|
||||
!dev->kfd->shared_resources.enable_mes)
|
||||
amdgpu_amdkfd_remove_gws_from_process(
|
||||
pqm->process->kgd_process_info,
|
||||
pqn->q->gws);
|
||||
pdd->qpd.num_gws = 0;
|
||||
}
|
||||
|
||||
if (dev->kfd->shared_resources.enable_mes) {
|
||||
amdgpu_amdkfd_free_gtt_mem(dev->adev,
|
||||
pqn->q->gang_ctx_bo);
|
||||
if (pqn->q->wptr_bo)
|
||||
amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->wptr_bo);
|
||||
|
||||
}
|
||||
pqm_clean_queue_resource(pqm, pqn);
|
||||
uninit_queue(pqn->q);
|
||||
}
|
||||
|
||||
|
@ -1870,7 +1870,7 @@ out_reschedule:
|
||||
/* If validation failed, reschedule another attempt */
|
||||
if (evicted_ranges) {
|
||||
pr_debug("reschedule to restore svm range\n");
|
||||
schedule_delayed_work(&svms->restore_work,
|
||||
queue_delayed_work(system_freezable_wq, &svms->restore_work,
|
||||
msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
|
||||
|
||||
kfd_smi_event_queue_restore_rescheduled(mm);
|
||||
@ -1946,7 +1946,7 @@ svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
|
||||
pr_debug("failed to quiesce KFD\n");
|
||||
|
||||
pr_debug("schedule to restore svm %p ranges\n", svms);
|
||||
schedule_delayed_work(&svms->restore_work,
|
||||
queue_delayed_work(system_freezable_wq, &svms->restore_work,
|
||||
msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
|
||||
} else {
|
||||
unsigned long s, l;
|
||||
|
@ -30,6 +30,9 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/hw
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/clk_mgr
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/hwss
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/resource
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dsc
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/optc
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
|
||||
|
@ -896,8 +896,7 @@ static int dm_early_init(void *handle);
|
||||
/* Allocate memory for FBC compressed data */
|
||||
static void amdgpu_dm_fbc_init(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(connector->dev);
|
||||
struct dm_compressor_info *compressor = &adev->dm.compressor;
|
||||
struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
|
||||
struct drm_display_mode *mode;
|
||||
@ -991,8 +990,7 @@ static int amdgpu_dm_audio_component_bind(struct device *kdev,
|
||||
static void amdgpu_dm_audio_component_unbind(struct device *kdev,
|
||||
struct device *hda_kdev, void *data)
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(kdev);
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(dev_get_drvdata(kdev));
|
||||
struct drm_audio_component *acomp = data;
|
||||
|
||||
acomp->ops = NULL;
|
||||
@ -1719,23 +1717,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
|
||||
adev->dm.dc->debug.ignore_cable_id = true;
|
||||
|
||||
/* TODO: There is a new drm mst change where the freedom of
|
||||
* vc_next_start_slot update is revoked/moved into drm, instead of in
|
||||
* driver. This forces us to make sure to get vc_next_start_slot updated
|
||||
* in drm function each time without considering if mst_state is active
|
||||
* or not. Otherwise, next time hotplug will give wrong start_slot
|
||||
* number. We are implementing a temporary solution to even notify drm
|
||||
* mst deallocation when link is no longer of MST type when uncommitting
|
||||
* the stream so we will have more time to work on a proper solution.
|
||||
* Ideally when dm_helpers_dp_mst_stop_top_mgr message is triggered, we
|
||||
* should notify drm to do a complete "reset" of its states and stop
|
||||
* calling further drm mst functions when link is no longer of an MST
|
||||
* type. This could happen when we unplug an MST hubs/displays. When
|
||||
* uncommit stream comes later after unplug, we should just reset
|
||||
* hardware states only.
|
||||
*/
|
||||
adev->dm.dc->debug.temp_mst_deallocation_sequence = true;
|
||||
|
||||
if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
|
||||
DRM_INFO("DP-HDMI FRL PCON supported\n");
|
||||
|
||||
@ -6269,7 +6250,7 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
|
||||
dm_new_state->underscan_enable = val;
|
||||
ret = 0;
|
||||
} else if (property == adev->mode_info.abm_level_property) {
|
||||
dm_new_state->abm_level = val;
|
||||
dm_new_state->abm_level = val ?: ABM_LEVEL_IMMEDIATE_DISABLE;
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
@ -6314,7 +6295,8 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
|
||||
*val = dm_state->underscan_enable;
|
||||
ret = 0;
|
||||
} else if (property == adev->mode_info.abm_level_property) {
|
||||
*val = dm_state->abm_level;
|
||||
*val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
|
||||
dm_state->abm_level : 0;
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
@ -6387,7 +6369,8 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
|
||||
state->pbn = 0;
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
|
||||
state->abm_level = amdgpu_dm_abm_level;
|
||||
state->abm_level = amdgpu_dm_abm_level ?:
|
||||
ABM_LEVEL_IMMEDIATE_DISABLE;
|
||||
|
||||
__drm_atomic_helper_connector_reset(connector, &state->base);
|
||||
}
|
||||
@ -10790,8 +10773,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
|
||||
struct dm_connector_state *dm_con_state = NULL;
|
||||
struct dc_sink *sink;
|
||||
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(connector->dev);
|
||||
struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
|
||||
bool freesync_capable = false;
|
||||
enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
|
||||
|
@ -2971,6 +2971,104 @@ static int allow_edp_hotplug_detection_set(void *data, u64 val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dmub_trace_mask_set(void *data, u64 val)
|
||||
{
|
||||
struct amdgpu_device *adev = data;
|
||||
struct dmub_srv *srv = adev->dm.dc->ctx->dmub_srv->dmub;
|
||||
enum dmub_gpint_command cmd;
|
||||
enum dmub_status status;
|
||||
u64 mask = 0xffff;
|
||||
u8 shift = 0;
|
||||
u32 res;
|
||||
int i;
|
||||
|
||||
if (!srv->fw_version)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
res = (val & mask) >> shift;
|
||||
|
||||
switch (i) {
|
||||
case 0:
|
||||
cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD0;
|
||||
break;
|
||||
case 1:
|
||||
cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1;
|
||||
break;
|
||||
case 2:
|
||||
cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD2;
|
||||
break;
|
||||
case 3:
|
||||
cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD3;
|
||||
break;
|
||||
}
|
||||
|
||||
status = dmub_srv_send_gpint_command(srv, cmd, res, 30);
|
||||
|
||||
if (status == DMUB_STATUS_TIMEOUT)
|
||||
return -ETIMEDOUT;
|
||||
else if (status == DMUB_STATUS_INVALID)
|
||||
return -EINVAL;
|
||||
else if (status != DMUB_STATUS_OK)
|
||||
return -EIO;
|
||||
|
||||
usleep_range(100, 1000);
|
||||
|
||||
mask <<= 16;
|
||||
shift += 16;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dmub_trace_mask_show(void *data, u64 *val)
|
||||
{
|
||||
enum dmub_gpint_command cmd = DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD0;
|
||||
struct amdgpu_device *adev = data;
|
||||
struct dmub_srv *srv = adev->dm.dc->ctx->dmub_srv->dmub;
|
||||
enum dmub_status status;
|
||||
u8 shift = 0;
|
||||
u64 raw = 0;
|
||||
u64 res = 0;
|
||||
int i = 0;
|
||||
|
||||
if (!srv->fw_version)
|
||||
return -EINVAL;
|
||||
|
||||
while (i < 4) {
|
||||
status = dmub_srv_send_gpint_command(srv, cmd, 0, 30);
|
||||
|
||||
if (status == DMUB_STATUS_OK) {
|
||||
status = dmub_srv_get_gpint_response(srv, (u32 *) &raw);
|
||||
|
||||
if (status == DMUB_STATUS_INVALID)
|
||||
return -EINVAL;
|
||||
else if (status != DMUB_STATUS_OK)
|
||||
return -EIO;
|
||||
} else if (status == DMUB_STATUS_TIMEOUT) {
|
||||
return -ETIMEDOUT;
|
||||
} else if (status == DMUB_STATUS_INVALID) {
|
||||
return -EINVAL;
|
||||
} else {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
usleep_range(100, 1000);
|
||||
|
||||
cmd++;
|
||||
res |= (raw << shift);
|
||||
shift += 16;
|
||||
i++;
|
||||
}
|
||||
|
||||
*val = res;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(dmub_trace_mask_fops, dmub_trace_mask_show,
|
||||
dmub_trace_mask_set, "0x%llx\n");
|
||||
|
||||
/*
|
||||
* Set dmcub trace event IRQ enable or disable.
|
||||
* Usage to enable dmcub trace event IRQ: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en
|
||||
@ -3647,12 +3745,16 @@ static int capabilities_show(struct seq_file *m, void *unused)
|
||||
bool mall_supported = dc->caps.mall_size_total;
|
||||
bool subvp_supported = dc->caps.subvp_fw_processing_delay_us;
|
||||
unsigned int mall_in_use = false;
|
||||
unsigned int subvp_in_use = dc->cap_funcs.get_subvp_en(dc, dc->current_state);
|
||||
unsigned int subvp_in_use = false;
|
||||
|
||||
struct hubbub *hubbub = dc->res_pool->hubbub;
|
||||
|
||||
if (hubbub->funcs->get_mall_en)
|
||||
hubbub->funcs->get_mall_en(hubbub, &mall_in_use);
|
||||
|
||||
if (dc->cap_funcs.get_subvp_en)
|
||||
subvp_in_use = dc->cap_funcs.get_subvp_en(dc, dc->current_state);
|
||||
|
||||
seq_printf(m, "mall supported: %s, enabled: %s\n",
|
||||
mall_supported ? "yes" : "no", mall_in_use ? "yes" : "no");
|
||||
seq_printf(m, "sub-viewport supported: %s, enabled: %s\n",
|
||||
@ -3880,6 +3982,9 @@ void dtn_debugfs_init(struct amdgpu_device *adev)
|
||||
debugfs_create_file_unsafe("amdgpu_dm_force_timing_sync", 0644, root,
|
||||
adev, &force_timing_sync_ops);
|
||||
|
||||
debugfs_create_file_unsafe("amdgpu_dm_dmub_trace_mask", 0644, root,
|
||||
adev, &dmub_trace_mask_fops);
|
||||
|
||||
debugfs_create_file_unsafe("amdgpu_dm_dmcub_trace_event_en", 0644, root,
|
||||
adev, &dmcub_trace_event_state_fops);
|
||||
|
||||
|
@ -334,15 +334,14 @@ enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
|
||||
return ACT_SUCCESS;
|
||||
}
|
||||
|
||||
bool dm_helpers_dp_mst_send_payload_allocation(
|
||||
void dm_helpers_dp_mst_send_payload_allocation(
|
||||
struct dc_context *ctx,
|
||||
const struct dc_stream_state *stream,
|
||||
bool enable)
|
||||
const struct dc_stream_state *stream)
|
||||
{
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
struct drm_dp_mst_topology_state *mst_state;
|
||||
struct drm_dp_mst_topology_mgr *mst_mgr;
|
||||
struct drm_dp_mst_atomic_payload *new_payload, old_payload;
|
||||
struct drm_dp_mst_atomic_payload *new_payload;
|
||||
enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD;
|
||||
enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
|
||||
int ret = 0;
|
||||
@ -350,25 +349,13 @@ bool dm_helpers_dp_mst_send_payload_allocation(
|
||||
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
|
||||
|
||||
if (!aconnector || !aconnector->mst_root)
|
||||
return false;
|
||||
return;
|
||||
|
||||
mst_mgr = &aconnector->mst_root->mst_mgr;
|
||||
mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
|
||||
|
||||
new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
|
||||
|
||||
if (!enable) {
|
||||
set_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
|
||||
clr_flag = MST_ALLOCATE_NEW_PAYLOAD;
|
||||
}
|
||||
|
||||
if (enable) {
|
||||
ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload);
|
||||
} else {
|
||||
dm_helpers_construct_old_payload(mst_mgr, mst_state,
|
||||
new_payload, &old_payload);
|
||||
drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload);
|
||||
}
|
||||
ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload);
|
||||
|
||||
if (ret) {
|
||||
amdgpu_dm_set_mst_status(&aconnector->mst_status,
|
||||
@ -379,10 +366,36 @@ bool dm_helpers_dp_mst_send_payload_allocation(
|
||||
amdgpu_dm_set_mst_status(&aconnector->mst_status,
|
||||
clr_flag, false);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void dm_helpers_dp_mst_update_mst_mgr_for_deallocation(
|
||||
struct dc_context *ctx,
|
||||
const struct dc_stream_state *stream)
|
||||
{
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
struct drm_dp_mst_topology_state *mst_state;
|
||||
struct drm_dp_mst_topology_mgr *mst_mgr;
|
||||
struct drm_dp_mst_atomic_payload *new_payload, old_payload;
|
||||
enum mst_progress_status set_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
|
||||
enum mst_progress_status clr_flag = MST_ALLOCATE_NEW_PAYLOAD;
|
||||
|
||||
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
|
||||
|
||||
if (!aconnector || !aconnector->mst_root)
|
||||
return;
|
||||
|
||||
mst_mgr = &aconnector->mst_root->mst_mgr;
|
||||
mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
|
||||
new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
|
||||
dm_helpers_construct_old_payload(mst_mgr, mst_state,
|
||||
new_payload, &old_payload);
|
||||
|
||||
drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload);
|
||||
|
||||
amdgpu_dm_set_mst_status(&aconnector->mst_status, set_flag, true);
|
||||
amdgpu_dm_set_mst_status(&aconnector->mst_status, clr_flag, false);
|
||||
}
|
||||
|
||||
void dm_dtn_log_begin(struct dc_context *ctx,
|
||||
struct dc_log_buffer_ctx *log_ctx)
|
||||
{
|
||||
|
@ -45,7 +45,7 @@
|
||||
#include "amdgpu_dm_debugfs.h"
|
||||
#endif
|
||||
|
||||
#include "dc/dcn20/dcn20_resource.h"
|
||||
#include "dc/resource/dcn20/dcn20_resource.h"
|
||||
|
||||
#define PEAK_FACTOR_X1000 1006
|
||||
|
||||
@ -425,8 +425,7 @@ dm_mst_atomic_best_encoder(struct drm_connector *connector,
|
||||
{
|
||||
struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
|
||||
connector);
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(connector->dev);
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc);
|
||||
|
||||
return &adev->dm.mst_encoders[acrtc->crtc_id].base;
|
||||
@ -1603,9 +1602,8 @@ enum dc_status dm_dp_mst_is_port_support_mode(
|
||||
struct dc_link_settings cur_link_settings;
|
||||
unsigned int end_to_end_bw_in_kbps = 0;
|
||||
unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
|
||||
unsigned int max_compressed_bw_in_kbps = 0;
|
||||
struct dc_dsc_bw_range bw_range = {0};
|
||||
uint16_t full_pbn = aconnector->mst_output_port->full_pbn;
|
||||
struct dc_dsc_config_options dsc_options = {0};
|
||||
|
||||
/*
|
||||
* Consider the case with the depth of the mst topology tree is equal or less than 2
|
||||
@ -1621,30 +1619,39 @@ enum dc_status dm_dp_mst_is_port_support_mode(
|
||||
(aconnector->mst_output_port->passthrough_aux ||
|
||||
aconnector->dsc_aux == &aconnector->mst_output_port->aux)) {
|
||||
cur_link_settings = stream->link->verified_link_cap;
|
||||
upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings);
|
||||
down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
|
||||
|
||||
upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
|
||||
&cur_link_settings);
|
||||
down_link_bw_in_kbps = kbps_from_pbn(full_pbn);
|
||||
/* pick the end to end bw bottleneck */
|
||||
end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps, down_link_bw_in_kbps);
|
||||
|
||||
/* pick the bottleneck */
|
||||
end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
|
||||
down_link_bw_in_kbps);
|
||||
|
||||
/*
|
||||
* use the maximum dsc compression bandwidth as the required
|
||||
* bandwidth for the mode
|
||||
*/
|
||||
max_compressed_bw_in_kbps = bw_range.min_kbps;
|
||||
|
||||
if (end_to_end_bw_in_kbps < max_compressed_bw_in_kbps) {
|
||||
DRM_DEBUG_DRIVER("Mode does not fit into DSC pass-through bandwidth validation\n");
|
||||
if (end_to_end_bw_in_kbps < bw_range.min_kbps) {
|
||||
DRM_DEBUG_DRIVER("maximum dsc compression cannot fit into end-to-end bandwidth\n");
|
||||
return DC_FAIL_BANDWIDTH_VALIDATE;
|
||||
}
|
||||
|
||||
if (end_to_end_bw_in_kbps < bw_range.stream_kbps) {
|
||||
dc_dsc_get_default_config_option(stream->link->dc, &dsc_options);
|
||||
dsc_options.max_target_bpp_limit_override_x16 = aconnector->base.display_info.max_dsc_bpp * 16;
|
||||
if (dc_dsc_compute_config(stream->sink->ctx->dc->res_pool->dscs[0],
|
||||
&stream->sink->dsc_caps.dsc_dec_caps,
|
||||
&dsc_options,
|
||||
end_to_end_bw_in_kbps,
|
||||
&stream->timing,
|
||||
dc_link_get_highest_encoding_format(stream->link),
|
||||
&stream->timing.dsc_cfg)) {
|
||||
stream->timing.flags.DSC = 1;
|
||||
DRM_DEBUG_DRIVER("end-to-end bandwidth require dsc and dsc config found\n");
|
||||
} else {
|
||||
DRM_DEBUG_DRIVER("end-to-end bandwidth require dsc but dsc config not found\n");
|
||||
return DC_FAIL_BANDWIDTH_VALIDATE;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* check if mode could be supported within full_pbn */
|
||||
bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
|
||||
pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp << 4);
|
||||
if (pbn > full_pbn)
|
||||
if (pbn > aconnector->mst_output_port->full_pbn)
|
||||
return DC_FAIL_BANDWIDTH_VALIDATE;
|
||||
}
|
||||
|
||||
|
@ -22,7 +22,7 @@
|
||||
#
|
||||
# Makefile for Display Core (dc) component.
|
||||
|
||||
DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc
|
||||
DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc
|
||||
|
||||
ifdef CONFIG_DRM_AMD_DC_FP
|
||||
|
||||
@ -38,8 +38,6 @@ DC_LIBS += dcn302
|
||||
DC_LIBS += dcn303
|
||||
DC_LIBS += dcn31
|
||||
DC_LIBS += dcn314
|
||||
DC_LIBS += dcn315
|
||||
DC_LIBS += dcn316
|
||||
DC_LIBS += dcn32
|
||||
DC_LIBS += dcn321
|
||||
DC_LIBS += dcn35
|
||||
@ -51,7 +49,6 @@ DC_LIBS += dce120
|
||||
|
||||
DC_LIBS += dce112
|
||||
DC_LIBS += dce110
|
||||
DC_LIBS += dce100
|
||||
DC_LIBS += dce80
|
||||
|
||||
ifdef CONFIG_DRM_AMD_DC_SI
|
||||
|
@ -334,7 +334,7 @@ static struct wm_table lpddr5_wm_table = {
|
||||
{
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.pstate_latency_us = 129.0,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
@ -342,7 +342,7 @@ static struct wm_table lpddr5_wm_table = {
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.pstate_latency_us = 129.0,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
@ -350,7 +350,7 @@ static struct wm_table lpddr5_wm_table = {
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.pstate_latency_us = 129.0,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
@ -358,7 +358,7 @@ static struct wm_table lpddr5_wm_table = {
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.pstate_latency_us = 129.0,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
|
@ -232,6 +232,10 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
if (dc->work_arounds.skip_clock_update)
|
||||
return;
|
||||
|
||||
/* DTBCLK is fixed, so set a default if unspecified. */
|
||||
if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
|
||||
new_clocks->ref_dtbclk_khz = 600000;
|
||||
|
||||
/*
|
||||
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
|
||||
* also if safe to lower is false, we just go in the higher state
|
||||
@ -265,8 +269,10 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
|
||||
if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
|
||||
dcn35_smu_set_dtbclk(clk_mgr, true);
|
||||
dcn35_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
|
||||
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
|
||||
|
||||
dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
|
||||
clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
|
||||
}
|
||||
|
||||
/* check that we're not already in D0 */
|
||||
@ -314,17 +320,12 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
update_dispclk = true;
|
||||
}
|
||||
|
||||
if (!new_clocks->dtbclk_en) {
|
||||
new_clocks->ref_dtbclk_khz = 600000;
|
||||
}
|
||||
|
||||
/* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */
|
||||
if (!dc->debug.disable_dtb_ref_clk_switch &&
|
||||
should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000, clk_mgr_base->clks.ref_dtbclk_khz / 1000)) {
|
||||
/* DCCG requires KHz precision for DTBCLK */
|
||||
dcn35_smu_set_dtbclk(clk_mgr, true);
|
||||
|
||||
dcn35_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
|
||||
should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000,
|
||||
clk_mgr_base->clks.ref_dtbclk_khz / 1000)) {
|
||||
dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
|
||||
clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
|
||||
}
|
||||
|
||||
if (dpp_clock_lowered) {
|
||||
@ -443,32 +444,32 @@ static struct wm_table ddr5_wm_table = {
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.sr_exit_time_us = 14.0,
|
||||
.sr_enter_plus_exit_time_us = 16.0,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.sr_exit_time_us = 14.0,
|
||||
.sr_enter_plus_exit_time_us = 16.0,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.sr_exit_time_us = 14.0,
|
||||
.sr_enter_plus_exit_time_us = 16.0,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.sr_exit_time_us = 14.0,
|
||||
.sr_enter_plus_exit_time_us = 16.0,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
@ -480,32 +481,32 @@ static struct wm_table lpddr5_wm_table = {
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.sr_exit_time_us = 14.0,
|
||||
.sr_enter_plus_exit_time_us = 16.0,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.sr_exit_time_us = 14.0,
|
||||
.sr_enter_plus_exit_time_us = 16.0,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.sr_exit_time_us = 14.0,
|
||||
.sr_enter_plus_exit_time_us = 16.0,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.sr_exit_time_us = 14.0,
|
||||
.sr_enter_plus_exit_time_us = 16.0,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
@ -515,11 +516,6 @@ static DpmClocks_t_dcn35 dummy_clocks;
|
||||
|
||||
static struct dcn35_watermarks dummy_wms = { 0 };
|
||||
|
||||
static struct dcn35_ss_info_table ss_info_table = {
|
||||
.ss_divider = 1000,
|
||||
.ss_percentage = {0, 0, 375, 375, 375}
|
||||
};
|
||||
|
||||
static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table)
|
||||
{
|
||||
int i, num_valid_sets;
|
||||
@ -653,27 +649,47 @@ static unsigned int convert_wck_ratio(uint8_t wck_ratio)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline uint32_t calc_dram_speed_mts(const MemPstateTable_t *entry)
|
||||
{
|
||||
return entry->UClk * convert_wck_ratio(entry->WckRatio) * 2;
|
||||
}
|
||||
|
||||
static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr,
|
||||
struct integrated_info *bios_info,
|
||||
DpmClocks_t_dcn35 *clock_table)
|
||||
{
|
||||
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
|
||||
struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
|
||||
uint32_t max_pstate = 0, max_uclk = 0, max_fclk = 0;
|
||||
uint32_t min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
|
||||
uint32_t max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
|
||||
uint32_t max_pstate = 0, max_dram_speed_mts = 0, min_dram_speed_mts = 0;
|
||||
int i;
|
||||
|
||||
/* Determine min/max p-state values. */
|
||||
for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
|
||||
if (is_valid_clock_value(clock_table->MemPstateTable[i].UClk) &&
|
||||
clock_table->MemPstateTable[i].UClk > max_uclk) {
|
||||
max_uclk = clock_table->MemPstateTable[i].UClk;
|
||||
uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
|
||||
|
||||
if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts > max_dram_speed_mts) {
|
||||
max_dram_speed_mts = dram_speed_mts;
|
||||
max_pstate = i;
|
||||
}
|
||||
}
|
||||
|
||||
/* We expect the table to contain at least one valid Uclk entry. */
|
||||
ASSERT(is_valid_clock_value(max_uclk));
|
||||
min_dram_speed_mts = max_dram_speed_mts;
|
||||
min_pstate = max_pstate;
|
||||
|
||||
for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
|
||||
uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
|
||||
|
||||
if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts < min_dram_speed_mts) {
|
||||
min_dram_speed_mts = dram_speed_mts;
|
||||
min_pstate = i;
|
||||
}
|
||||
}
|
||||
|
||||
/* We expect the table to contain at least one valid P-state entry. */
|
||||
ASSERT(clock_table->NumMemPstatesEnabled &&
|
||||
is_valid_clock_value(max_dram_speed_mts) &&
|
||||
is_valid_clock_value(min_dram_speed_mts));
|
||||
|
||||
/* dispclk and dppclk can be max at any voltage, same number of levels for both */
|
||||
if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
|
||||
@ -683,47 +699,46 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
|
||||
max_dppclk = find_max_clk_value(clock_table->DppClocks,
|
||||
clock_table->NumDispClkLevelsEnabled);
|
||||
} else {
|
||||
/* Invalid number of entries in the table from PMFW. */
|
||||
ASSERT(0);
|
||||
}
|
||||
if (clock_table->NumFclkLevelsEnabled <= NUM_FCLK_DPM_LEVELS)
|
||||
max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq,
|
||||
clock_table->NumFclkLevelsEnabled);
|
||||
|
||||
for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
|
||||
uint32_t min_uclk = clock_table->MemPstateTable[0].UClk;
|
||||
/* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
|
||||
ASSERT(clock_table->NumDcfClkLevelsEnabled > 0);
|
||||
|
||||
max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, clock_table->NumFclkLevelsEnabled);
|
||||
|
||||
for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
|
||||
int j;
|
||||
|
||||
for (j = 1; j < clock_table->NumMemPstatesEnabled; j++) {
|
||||
if (is_valid_clock_value(clock_table->MemPstateTable[j].UClk) &&
|
||||
clock_table->MemPstateTable[j].UClk < min_uclk &&
|
||||
clock_table->MemPstateTable[j].Voltage <= clock_table->SocVoltage[i]) {
|
||||
min_uclk = clock_table->MemPstateTable[j].UClk;
|
||||
min_pstate = j;
|
||||
}
|
||||
}
|
||||
|
||||
/* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
|
||||
for (j = bw_params->clk_table.num_entries - 1; j > 0; j--)
|
||||
if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i])
|
||||
break;
|
||||
break;
|
||||
|
||||
bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz;
|
||||
bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz;
|
||||
bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz;
|
||||
bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
|
||||
|
||||
/* Now update clocks we do read */
|
||||
bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[min_pstate].MemClk;
|
||||
bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[min_pstate].Voltage;
|
||||
bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
|
||||
bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
|
||||
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
|
||||
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
|
||||
bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
|
||||
clock_table->MemPstateTable[min_pstate].WckRatio);
|
||||
}
|
||||
bw_params->clk_table.entries[i].wck_ratio =
|
||||
convert_wck_ratio(clock_table->MemPstateTable[min_pstate].WckRatio);
|
||||
|
||||
/* Dcfclk and Fclk are tied, but at a different ratio */
|
||||
bw_params->clk_table.entries[i].fclk_mhz = min(max_fclk, 2 * clock_table->DcfClocks[i]);
|
||||
}
|
||||
|
||||
/* Make sure to include at least one entry at highest pstate */
|
||||
if (max_pstate != min_pstate || i == 0) {
|
||||
if (i > MAX_NUM_DPM_LVL - 1)
|
||||
i = MAX_NUM_DPM_LVL - 1;
|
||||
|
||||
bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
|
||||
bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[max_pstate].MemClk;
|
||||
bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[max_pstate].Voltage;
|
||||
@ -739,6 +754,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
|
||||
}
|
||||
bw_params->clk_table.num_entries = i--;
|
||||
|
||||
/* Make sure all highest clocks are included*/
|
||||
bw_params->clk_table.entries[i].socclk_mhz =
|
||||
find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
|
||||
bw_params->clk_table.entries[i].dispclk_mhz =
|
||||
@ -757,6 +773,11 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
|
||||
bw_params->clk_table.num_entries_per_clk.num_fclk_levels = clock_table->NumFclkLevelsEnabled;
|
||||
bw_params->clk_table.num_entries_per_clk.num_memclk_levels = clock_table->NumMemPstatesEnabled;
|
||||
bw_params->clk_table.num_entries_per_clk.num_socclk_levels = clock_table->NumSocClkLevelsEnabled;
|
||||
|
||||
/*
|
||||
* Set any 0 clocks to max default setting. Not an issue for
|
||||
* power since we aren't doing switching in such case anyway
|
||||
*/
|
||||
for (i = 0; i < bw_params->clk_table.num_entries; i++) {
|
||||
if (!bw_params->clk_table.entries[i].fclk_mhz) {
|
||||
bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
|
||||
@ -965,21 +986,6 @@ struct clk_mgr_funcs dcn35_fpga_funcs = {
|
||||
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
|
||||
};
|
||||
|
||||
static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
uint32_t clock_source;
|
||||
struct dc_context *ctx = clk_mgr->base.ctx;
|
||||
|
||||
REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
|
||||
|
||||
clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
|
||||
|
||||
if (clk_mgr->dprefclk_ss_percentage != 0) {
|
||||
clk_mgr->ss_on_dprefclk = true;
|
||||
clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
|
||||
}
|
||||
}
|
||||
|
||||
void dcn35_clk_mgr_construct(
|
||||
struct dc_context *ctx,
|
||||
struct clk_mgr_dcn35 *clk_mgr,
|
||||
@ -1043,17 +1049,11 @@ void dcn35_clk_mgr_construct(
|
||||
dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
|
||||
|
||||
clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base);
|
||||
clk_mgr->base.base.clks.ref_dtbclk_khz = dcn35_smu_get_dtbclk(&clk_mgr->base);
|
||||
clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
|
||||
|
||||
if (!clk_mgr->base.base.clks.ref_dtbclk_khz)
|
||||
dcn35_smu_set_dtbclk(&clk_mgr->base, true);
|
||||
|
||||
clk_mgr->base.base.clks.dtbclk_en = true;
|
||||
dce_clock_read_ss_info(&clk_mgr->base);
|
||||
/*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/
|
||||
|
||||
dcn35_read_ss_info_from_lut(&clk_mgr->base);
|
||||
|
||||
clk_mgr->base.base.bw_params = &dcn35_bw_params;
|
||||
|
||||
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
|
||||
@ -1129,7 +1129,6 @@ void dcn35_clk_mgr_construct(
|
||||
ctx->dc->debug.disable_dpp_power_gate = false;
|
||||
ctx->dc->debug.disable_hubp_power_gate = false;
|
||||
ctx->dc->debug.disable_dsc_power_gate = false;
|
||||
ctx->dc->debug.disable_hpo_power_gate = false;
|
||||
} else {
|
||||
/*let's reset the config control flag*/
|
||||
ctx->dc->config.disable_ips = DMUB_IPS_DISABLE_ALL; /*pmfw not support it, disable it all*/
|
||||
|
@ -116,6 +116,9 @@ static uint32_t dcn35_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un
|
||||
msleep(delay_us/1000);
|
||||
else if (delay_us > 0)
|
||||
udelay(delay_us);
|
||||
|
||||
if (clk_mgr->base.ctx->dc->debug.disable_timeout)
|
||||
max_retries++;
|
||||
} while (max_retries--);
|
||||
|
||||
return res_val;
|
||||
|
@ -1964,6 +1964,10 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
||||
wait_for_no_pipes_pending(dc, context);
|
||||
/* pplib is notified if disp_num changed */
|
||||
dc->hwss.optimize_bandwidth(dc, context);
|
||||
/* Need to do otg sync again as otg could be out of sync due to otg
|
||||
* workaround applied during clock update
|
||||
*/
|
||||
dc_trigger_sync(dc, context);
|
||||
}
|
||||
|
||||
if (dc->hwss.update_dsc_pg)
|
||||
|
@ -69,8 +69,8 @@
|
||||
#include "dcn314/dcn314_resource.h"
|
||||
#include "dcn315/dcn315_resource.h"
|
||||
#include "dcn316/dcn316_resource.h"
|
||||
#include "../dcn32/dcn32_resource.h"
|
||||
#include "../dcn321/dcn321_resource.h"
|
||||
#include "dcn32/dcn32_resource.h"
|
||||
#include "dcn321/dcn321_resource.h"
|
||||
#include "dcn35/dcn35_resource.h"
|
||||
|
||||
#define VISUAL_CONFIRM_BASE_DEFAULT 3
|
||||
@ -1764,6 +1764,29 @@ int recource_find_free_pipe_not_used_in_cur_res_ctx(
|
||||
return free_pipe_idx;
|
||||
}
|
||||
|
||||
int recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx(
|
||||
const struct resource_context *cur_res_ctx,
|
||||
struct resource_context *new_res_ctx,
|
||||
const struct resource_pool *pool)
|
||||
{
|
||||
int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
|
||||
const struct pipe_ctx *new_pipe, *cur_pipe;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pool->pipe_count; i++) {
|
||||
cur_pipe = &cur_res_ctx->pipe_ctx[i];
|
||||
new_pipe = &new_res_ctx->pipe_ctx[i];
|
||||
|
||||
if (resource_is_pipe_type(cur_pipe, OTG_MASTER) &&
|
||||
resource_is_pipe_type(new_pipe, FREE_PIPE)) {
|
||||
free_pipe_idx = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return free_pipe_idx;
|
||||
}
|
||||
|
||||
int resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine(
|
||||
const struct resource_context *cur_res_ctx,
|
||||
struct resource_context *new_res_ctx,
|
||||
@ -2233,7 +2256,7 @@ static struct pipe_ctx *get_last_dpp_pipe_in_mpcc_combine(
|
||||
}
|
||||
|
||||
static bool update_pipe_params_after_odm_slice_count_change(
|
||||
const struct dc_stream_state *stream,
|
||||
struct pipe_ctx *otg_master,
|
||||
struct dc_state *context,
|
||||
const struct resource_pool *pool)
|
||||
{
|
||||
@ -2243,9 +2266,12 @@ static bool update_pipe_params_after_odm_slice_count_change(
|
||||
|
||||
for (i = 0; i < pool->pipe_count && result; i++) {
|
||||
pipe = &context->res_ctx.pipe_ctx[i];
|
||||
if (pipe->stream == stream && pipe->plane_state)
|
||||
if (pipe->stream == otg_master->stream && pipe->plane_state)
|
||||
result = resource_build_scaling_params(pipe);
|
||||
}
|
||||
|
||||
if (pool->funcs->build_pipe_pix_clk_params)
|
||||
pool->funcs->build_pipe_pix_clk_params(otg_master);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -2928,7 +2954,7 @@ bool resource_update_pipes_for_stream_with_slice_count(
|
||||
otg_master, new_ctx, pool);
|
||||
if (result)
|
||||
result = update_pipe_params_after_odm_slice_count_change(
|
||||
otg_master->stream, new_ctx, pool);
|
||||
otg_master, new_ctx, pool);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -3586,6 +3612,7 @@ static void mark_seamless_boot_stream(
|
||||
* |________|_______________|___________|_____________|
|
||||
*/
|
||||
static bool acquire_otg_master_pipe_for_stream(
|
||||
const struct dc_state *cur_ctx,
|
||||
struct dc_state *new_ctx,
|
||||
const struct resource_pool *pool,
|
||||
struct dc_stream_state *stream)
|
||||
@ -3599,7 +3626,22 @@ static bool acquire_otg_master_pipe_for_stream(
|
||||
int pipe_idx;
|
||||
struct pipe_ctx *pipe_ctx = NULL;
|
||||
|
||||
pipe_idx = resource_find_any_free_pipe(&new_ctx->res_ctx, pool);
|
||||
/*
|
||||
* Upper level code is responsible to optimize unnecessary addition and
|
||||
* removal for unchanged streams. So unchanged stream will keep the same
|
||||
* OTG master instance allocated. When current stream is removed and a
|
||||
* new stream is added, we want to reuse the OTG instance made available
|
||||
* by the removed stream first. If not found, we try to avoid of using
|
||||
* any free pipes already used in current context as this could tear
|
||||
* down exiting ODM/MPC/MPO configuration unnecessarily.
|
||||
*/
|
||||
pipe_idx = recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx(
|
||||
&cur_ctx->res_ctx, &new_ctx->res_ctx, pool);
|
||||
if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
|
||||
pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx(
|
||||
&cur_ctx->res_ctx, &new_ctx->res_ctx, pool);
|
||||
if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
|
||||
pipe_idx = resource_find_any_free_pipe(&new_ctx->res_ctx, pool);
|
||||
if (pipe_idx != FREE_PIPE_INDEX_NOT_FOUND) {
|
||||
pipe_ctx = &new_ctx->res_ctx.pipe_ctx[pipe_idx];
|
||||
memset(pipe_ctx, 0, sizeof(*pipe_ctx));
|
||||
@ -3659,7 +3701,7 @@ enum dc_status resource_map_pool_resources(
|
||||
|
||||
if (!acquired)
|
||||
/* acquire new resources */
|
||||
acquired = acquire_otg_master_pipe_for_stream(
|
||||
acquired = acquire_otg_master_pipe_for_stream(dc->current_state,
|
||||
context, pool, stream);
|
||||
|
||||
pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
|
||||
@ -4512,7 +4554,7 @@ void dc_resource_state_copy_construct(
|
||||
struct dml2_context *dml2 = NULL;
|
||||
|
||||
// Need to preserve allocated dml2 context
|
||||
if (src_ctx->clk_mgr->ctx->dc->debug.using_dml2)
|
||||
if (src_ctx->clk_mgr && src_ctx->clk_mgr->ctx->dc->debug.using_dml2)
|
||||
dml2 = dst_ctx->bw_ctx.dml2;
|
||||
#endif
|
||||
|
||||
@ -4520,7 +4562,7 @@ void dc_resource_state_copy_construct(
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_FP
|
||||
// Preserve allocated dml2 context
|
||||
if (src_ctx->clk_mgr->ctx->dc->debug.using_dml2)
|
||||
if (src_ctx->clk_mgr && src_ctx->clk_mgr->ctx->dc->debug.using_dml2)
|
||||
dst_ctx->bw_ctx.dml2 = dml2;
|
||||
#endif
|
||||
|
||||
@ -5274,7 +5316,7 @@ bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_st
|
||||
if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 &&
|
||||
((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)
|
||||
return true;
|
||||
else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 2160 &&
|
||||
else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 1080 &&
|
||||
((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)
|
||||
return true;
|
||||
|
||||
|
@ -49,7 +49,7 @@ struct aux_payload;
|
||||
struct set_config_cmd_payload;
|
||||
struct dmub_notification;
|
||||
|
||||
#define DC_VER "3.2.259"
|
||||
#define DC_VER "3.2.262"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_PLANES 6
|
||||
@ -874,6 +874,7 @@ struct dc_debug_options {
|
||||
unsigned int seamless_boot_odm_combine;
|
||||
unsigned int force_odm_combine_4to1; //bit vector based on otg inst
|
||||
int minimum_z8_residency_time;
|
||||
int minimum_z10_residency_time;
|
||||
bool disable_z9_mpc;
|
||||
unsigned int force_fclk_khz;
|
||||
bool enable_tri_buf;
|
||||
@ -955,7 +956,6 @@ struct dc_debug_options {
|
||||
unsigned int min_prefetch_in_strobe_ns;
|
||||
bool disable_unbounded_requesting;
|
||||
bool dig_fifo_off_in_blank;
|
||||
bool temp_mst_deallocation_sequence;
|
||||
bool override_dispclk_programming;
|
||||
bool otg_crc_db;
|
||||
bool disallow_dispclk_dppclk_ds;
|
||||
@ -978,6 +978,7 @@ struct dc_debug_options {
|
||||
bool psp_disabled_wa;
|
||||
unsigned int ips2_eval_delay_us;
|
||||
unsigned int ips2_entry_delay_us;
|
||||
bool disable_timeout;
|
||||
};
|
||||
|
||||
struct gpu_info_soc_bounding_box_v1_0;
|
||||
@ -1608,7 +1609,6 @@ struct dc_link {
|
||||
enum edp_revision edp_revision;
|
||||
union dpcd_sink_ext_caps dpcd_sink_ext_caps;
|
||||
|
||||
struct backlight_settings backlight_settings;
|
||||
struct psr_settings psr_settings;
|
||||
|
||||
struct replay_settings replay_settings;
|
||||
|
@ -241,7 +241,12 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun
|
||||
|
||||
// Wait for DMUB to process command
|
||||
if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
|
||||
status = dmub_srv_wait_for_idle(dmub, 100000);
|
||||
if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
|
||||
do {
|
||||
status = dmub_srv_wait_for_idle(dmub, 100000);
|
||||
} while (status != DMUB_STATUS_OK);
|
||||
} else
|
||||
status = dmub_srv_wait_for_idle(dmub, 100000);
|
||||
|
||||
if (status != DMUB_STATUS_OK) {
|
||||
DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
|
||||
@ -1147,10 +1152,16 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
|
||||
return true;
|
||||
|
||||
if (wait) {
|
||||
status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000);
|
||||
if (status != DMUB_STATUS_OK) {
|
||||
DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status);
|
||||
return false;
|
||||
if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
|
||||
do {
|
||||
status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000);
|
||||
} while (status != DMUB_STATUS_OK);
|
||||
} else {
|
||||
status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000);
|
||||
if (status != DMUB_STATUS_OK) {
|
||||
DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} else
|
||||
return dmub_srv_is_hw_pwr_up(dc_dmub_srv->dmub);
|
||||
@ -1187,7 +1198,7 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
|
||||
const uint32_t max_num_polls = 10000;
|
||||
uint32_t allow_state = 0;
|
||||
uint32_t commit_state = 0;
|
||||
uint32_t i;
|
||||
int i;
|
||||
|
||||
if (dc->debug.dmcub_emulation)
|
||||
return;
|
||||
@ -1220,6 +1231,9 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
|
||||
break;
|
||||
|
||||
udelay(1);
|
||||
|
||||
if (dc->debug.disable_timeout)
|
||||
i--;
|
||||
}
|
||||
ASSERT(i < max_num_polls);
|
||||
|
||||
@ -1242,6 +1256,9 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
|
||||
break;
|
||||
|
||||
udelay(1);
|
||||
|
||||
if (dc->debug.disable_timeout)
|
||||
i--;
|
||||
}
|
||||
ASSERT(i < max_num_polls);
|
||||
}
|
||||
|
@ -991,10 +991,6 @@ struct link_mst_stream_allocation_table {
|
||||
struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
|
||||
};
|
||||
|
||||
struct backlight_settings {
|
||||
uint32_t backlight_millinits;
|
||||
};
|
||||
|
||||
/* PSR feature flags */
|
||||
struct psr_settings {
|
||||
bool psr_feature_enabled; // PSR is supported by sink
|
||||
@ -1022,6 +1018,24 @@ enum replay_coasting_vtotal_type {
|
||||
PR_COASTING_TYPE_NUM,
|
||||
};
|
||||
|
||||
enum replay_link_off_frame_count_level {
|
||||
PR_LINK_OFF_FRAME_COUNT_FAIL = 0x0,
|
||||
PR_LINK_OFF_FRAME_COUNT_GOOD = 0x2,
|
||||
PR_LINK_OFF_FRAME_COUNT_BEST = 0x6,
|
||||
};
|
||||
|
||||
/*
|
||||
* This is general Interface for Replay to
|
||||
* set an 32 bit variable to dmub
|
||||
* The Message_type indicates which variable
|
||||
* passed to DMUB.
|
||||
*/
|
||||
enum replay_FW_Message_type {
|
||||
Replay_Msg_Not_Support = -1,
|
||||
Replay_Set_Timing_Sync_Supported,
|
||||
Replay_Set_Residency_Frameupdate_Timer,
|
||||
};
|
||||
|
||||
union replay_error_status {
|
||||
struct {
|
||||
unsigned char STATE_TRANSITION_ERROR :1;
|
||||
@ -1033,26 +1047,48 @@ union replay_error_status {
|
||||
};
|
||||
|
||||
struct replay_config {
|
||||
bool replay_supported; // Replay feature is supported
|
||||
unsigned int replay_power_opt_supported; // Power opt flags that are supported
|
||||
bool replay_smu_opt_supported; // SMU optimization is supported
|
||||
unsigned int replay_enable_option; // Replay enablement option
|
||||
uint32_t debug_flags; // Replay debug flags
|
||||
bool replay_timing_sync_supported; // Replay desync is supported
|
||||
bool force_disable_desync_error_check; // Replay desync is supported
|
||||
bool received_desync_error_hpd; //Replay Received Desync Error HPD.
|
||||
union replay_error_status replay_error_status; // Replay error status
|
||||
/* Replay feature is supported */
|
||||
bool replay_supported;
|
||||
/* Power opt flags that are supported */
|
||||
unsigned int replay_power_opt_supported;
|
||||
/* SMU optimization is supported */
|
||||
bool replay_smu_opt_supported;
|
||||
/* Replay enablement option */
|
||||
unsigned int replay_enable_option;
|
||||
/* Replay debug flags */
|
||||
uint32_t debug_flags;
|
||||
/* Replay sync is supported */
|
||||
bool replay_timing_sync_supported;
|
||||
/* Replay Disable desync error check. */
|
||||
bool force_disable_desync_error_check;
|
||||
/* Replay Received Desync Error HPD. */
|
||||
bool received_desync_error_hpd;
|
||||
/* Replay feature is supported long vblank */
|
||||
bool replay_support_fast_resync_in_ultra_sleep_mode;
|
||||
/* Replay error status */
|
||||
union replay_error_status replay_error_status;
|
||||
};
|
||||
|
||||
/* Replay feature flags */
|
||||
/* Replay feature flags*/
|
||||
struct replay_settings {
|
||||
struct replay_config config; // Replay configuration
|
||||
bool replay_feature_enabled; // Replay feature is ready for activating
|
||||
bool replay_allow_active; // Replay is currently active
|
||||
unsigned int replay_power_opt_active; // Power opt flags that are activated currently
|
||||
bool replay_smu_opt_enable; // SMU optimization is enabled
|
||||
uint16_t coasting_vtotal; // Current Coasting vtotal
|
||||
uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM]; // Coasting vtotal table
|
||||
/* Replay configuration */
|
||||
struct replay_config config;
|
||||
/* Replay feature is ready for activating */
|
||||
bool replay_feature_enabled;
|
||||
/* Replay is currently active */
|
||||
bool replay_allow_active;
|
||||
/* Replay is currently active */
|
||||
bool replay_allow_long_vblank;
|
||||
/* Power opt flags that are activated currently */
|
||||
unsigned int replay_power_opt_active;
|
||||
/* SMU optimization is enabled */
|
||||
bool replay_smu_opt_enable;
|
||||
/* Current Coasting vtotal */
|
||||
uint16_t coasting_vtotal;
|
||||
/* Coasting vtotal table */
|
||||
uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
|
||||
/* Maximum link off frame count */
|
||||
enum replay_link_off_frame_count_level link_off_frame_count_level;
|
||||
};
|
||||
|
||||
/* To split out "global" and "per-panel" config settings.
|
||||
|
@ -45,6 +45,8 @@ struct dmub_replay_funcs {
|
||||
struct replay_context *replay_context, uint8_t panel_inst);
|
||||
void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt,
|
||||
uint8_t panel_inst);
|
||||
void (*replay_send_cmd)(struct dmub_replay *dmub,
|
||||
enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_element);
|
||||
void (*replay_set_coasting_vtotal)(struct dmub_replay *dmub, uint16_t coasting_vtotal,
|
||||
uint8_t panel_inst);
|
||||
void (*replay_residency)(struct dmub_replay *dmub,
|
||||
|
@ -1,46 +0,0 @@
|
||||
#
|
||||
# Copyright 2017 Advanced Micro Devices, Inc.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
# to deal in the Software without restriction, including without limitation
|
||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
# and/or sell copies of the Software, and to permit persons to whom the
|
||||
# Software is furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
# OTHER DEALINGS IN THE SOFTWARE.
|
||||
#
|
||||
#
|
||||
# Makefile for the 'controller' sub-component of DAL.
|
||||
# It provides the control and status of HW CRTC block.
|
||||
|
||||
CFLAGS_$(AMDDALPATH)/dc/dce100/dce100_resource.o = $(call cc-disable-warning, override-init)
|
||||
|
||||
DCE100 = dce100_resource.o
|
||||
|
||||
AMD_DAL_DCE100 = $(addprefix $(AMDDALPATH)/dc/dce100/,$(DCE100))
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMD_DAL_DCE100)
|
||||
|
||||
|
||||
###############################################################################
|
||||
# DCE 10x
|
||||
###############################################################################
|
||||
ifdef 0#CONFIG_DRM_AMD_DC_DCE11_0
|
||||
TG_DCE100 = dce100_resource.o
|
||||
|
||||
AMD_DAL_TG_DCE100 = $(addprefix \
|
||||
$(AMDDALPATH)/dc/dce100/,$(TG_DCE100))
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMD_DAL_TG_DCE100)
|
||||
endif
|
||||
|
@ -26,8 +26,8 @@
|
||||
CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = $(call cc-disable-warning, override-init)
|
||||
|
||||
DCE110 = dce110_timing_generator.o \
|
||||
dce110_compressor.o dce110_resource.o \
|
||||
dce110_opp_regamma_v.o dce110_opp_csc_v.o dce110_timing_generator_v.o \
|
||||
dce110_compressor.o dce110_opp_regamma_v.o \
|
||||
dce110_opp_csc_v.o dce110_timing_generator_v.o \
|
||||
dce110_mem_input_v.o dce110_opp_v.o dce110_transform_v.o
|
||||
|
||||
AMD_DAL_DCE110 = $(addprefix $(AMDDALPATH)/dc/dce110/,$(DCE110))
|
||||
|
@ -25,8 +25,7 @@
|
||||
|
||||
CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = $(call cc-disable-warning, override-init)
|
||||
|
||||
DCE112 = dce112_compressor.o \
|
||||
dce112_resource.o
|
||||
DCE112 = dce112_compressor.o
|
||||
|
||||
AMD_DAL_DCE112 = $(addprefix $(AMDDALPATH)/dc/dce112/,$(DCE112))
|
||||
|
||||
|
@ -26,7 +26,7 @@
|
||||
|
||||
CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = $(call cc-disable-warning, override-init)
|
||||
|
||||
DCE120 = dce120_resource.o dce120_timing_generator.o \
|
||||
DCE120 = dce120_timing_generator.o
|
||||
|
||||
AMD_DAL_DCE120 = $(addprefix $(AMDDALPATH)/dc/dce120/,$(DCE120))
|
||||
|
||||
|
@ -25,8 +25,7 @@
|
||||
|
||||
CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = $(call cc-disable-warning, override-init)
|
||||
|
||||
DCE80 = dce80_timing_generator.o \
|
||||
dce80_resource.o
|
||||
DCE80 = dce80_timing_generator.o
|
||||
|
||||
AMD_DAL_DCE80 = $(addprefix $(AMDDALPATH)/dc/dce80/,$(DCE80))
|
||||
|
||||
|
@ -22,9 +22,9 @@
|
||||
#
|
||||
# Makefile for DCN.
|
||||
|
||||
DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o \
|
||||
DCN10 = dcn10_init.o dcn10_ipp.o \
|
||||
dcn10_hw_sequencer_debug.o \
|
||||
dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
|
||||
dcn10_dpp.o dcn10_opp.o \
|
||||
dcn10_hubp.o dcn10_mpc.o \
|
||||
dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
|
||||
dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o
|
||||
|
@ -32,7 +32,7 @@
|
||||
#include "dce/dce_hwseq.h"
|
||||
#include "abm.h"
|
||||
#include "dmcu.h"
|
||||
#include "dcn10_optc.h"
|
||||
#include "dcn10/dcn10_optc.h"
|
||||
#include "dcn10/dcn10_dpp.h"
|
||||
#include "dcn10/dcn10_mpc.h"
|
||||
#include "timing_generator.h"
|
||||
|
@ -2,13 +2,11 @@
|
||||
#
|
||||
# Makefile for DCN.
|
||||
|
||||
DCN20 = dcn20_resource.o dcn20_init.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
|
||||
dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_optc.o dcn20_mmhubbub.o \
|
||||
DCN20 = dcn20_init.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
|
||||
dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_mmhubbub.o \
|
||||
dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \
|
||||
dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o
|
||||
|
||||
DCN20 += dcn20_dsc.o
|
||||
|
||||
AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20))
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMD_DAL_DCN20)
|
||||
|
@ -291,7 +291,11 @@
|
||||
type SYMCLKB_FE_SRC_SEL;\
|
||||
type SYMCLKC_FE_SRC_SEL;\
|
||||
type SYMCLKD_FE_SRC_SEL;\
|
||||
type SYMCLKE_FE_SRC_SEL;
|
||||
type SYMCLKE_FE_SRC_SEL;\
|
||||
type DTBCLK_P0_GATE_DISABLE;\
|
||||
type DTBCLK_P1_GATE_DISABLE;\
|
||||
type DTBCLK_P2_GATE_DISABLE;\
|
||||
type DTBCLK_P3_GATE_DISABLE;\
|
||||
|
||||
struct dccg_shift {
|
||||
DCCG_REG_FIELD_LIST(uint8_t)
|
||||
|
@ -1,9 +1,9 @@
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
# Makefile for DCN.
|
||||
DCN201 = dcn201_init.o dcn201_resource.o \
|
||||
DCN201 = dcn201_init.o \
|
||||
dcn201_hubbub.o\
|
||||
dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_optc.o dcn201_dpp.o \
|
||||
dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_dpp.o \
|
||||
dcn201_dccg.o dcn201_link_encoder.o
|
||||
|
||||
AMD_DAL_DCN201 = $(addprefix $(AMDDALPATH)/dc/dcn201/,$(DCN201))
|
||||
|
@ -2,7 +2,7 @@
|
||||
#
|
||||
# Makefile for DCN21.
|
||||
|
||||
DCN21 = dcn21_init.o dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o \
|
||||
DCN21 = dcn21_init.o dcn21_hubp.o dcn21_hubbub.o \
|
||||
dcn21_link_encoder.o dcn21_dccg.o
|
||||
|
||||
AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21))
|
||||
|
@ -28,7 +28,6 @@ DCN30 := \
|
||||
dcn30_hubbub.o \
|
||||
dcn30_hubp.o \
|
||||
dcn30_dpp.o \
|
||||
dcn30_optc.o \
|
||||
dcn30_dccg.o \
|
||||
dcn30_mpc.o dcn30_vpg.o \
|
||||
dcn30_afmt.o \
|
||||
@ -38,7 +37,6 @@ DCN30 := \
|
||||
dcn30_dwb_cm.o \
|
||||
dcn30_cm_common.o \
|
||||
dcn30_mmhubbub.o \
|
||||
dcn30_resource.o \
|
||||
dcn30_dio_link_encoder.o
|
||||
|
||||
|
||||
|
@ -10,9 +10,8 @@
|
||||
#
|
||||
# Makefile for dcn30.
|
||||
|
||||
DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \
|
||||
dcn301_dio_link_encoder.o dcn301_panel_cntl.o dcn301_hubbub.o \
|
||||
dcn301_optc.o
|
||||
DCN301 = dcn301_init.o dcn301_dccg.o \
|
||||
dcn301_dio_link_encoder.o dcn301_panel_cntl.o dcn301_hubbub.o
|
||||
|
||||
AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301))
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
#
|
||||
# Makefile for dcn302.
|
||||
|
||||
DCN3_02 = dcn302_init.o dcn302_resource.o
|
||||
DCN3_02 = dcn302_init.o
|
||||
|
||||
AMD_DAL_DCN3_02 = $(addprefix $(AMDDALPATH)/dc/dcn302/,$(DCN3_02))
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
#
|
||||
# Makefile for dcn303.
|
||||
|
||||
DCN3_03 = dcn303_init.o dcn303_resource.o
|
||||
DCN3_03 = dcn303_init.o
|
||||
|
||||
AMD_DAL_DCN3_03 = $(addprefix $(AMDDALPATH)/dc/dcn303/,$(DCN3_03))
|
||||
|
||||
|
@ -10,8 +10,8 @@
|
||||
#
|
||||
# Makefile for dcn31.
|
||||
|
||||
DCN31 = dcn31_resource.o dcn31_hubbub.o dcn31_init.o dcn31_hubp.o \
|
||||
dcn31_dccg.o dcn31_optc.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \
|
||||
DCN31 = dcn31_hubbub.o dcn31_init.o dcn31_hubp.o \
|
||||
dcn31_dccg.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \
|
||||
dcn31_apg.o dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o \
|
||||
dcn31_afmt.o dcn31_vpg.o
|
||||
|
||||
|
@ -10,8 +10,8 @@
|
||||
#
|
||||
# Makefile for dcn314.
|
||||
|
||||
DCN314 = dcn314_resource.o dcn314_init.o \
|
||||
dcn314_dio_stream_encoder.o dcn314_dccg.o dcn314_optc.o
|
||||
DCN314 = dcn314_init.o \
|
||||
dcn314_dio_stream_encoder.o dcn314_dccg.o
|
||||
|
||||
AMD_DAL_DCN314 = $(addprefix $(AMDDALPATH)/dc/dcn314/,$(DCN314))
|
||||
|
||||
|
@ -1,30 +0,0 @@
|
||||
#
|
||||
# Copyright © 2021 Advanced Micro Devices, Inc.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
# to deal in the Software without restriction, including without limitation
|
||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
# and/or sell copies of the Software, and to permit persons to whom the
|
||||
# Software is furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
# OTHER DEALINGS IN THE SOFTWARE.
|
||||
#
|
||||
# Authors: AMD
|
||||
#
|
||||
# Makefile for dcn315.
|
||||
|
||||
DCN315 = dcn315_resource.o
|
||||
|
||||
AMD_DAL_DCN315 = $(addprefix $(AMDDALPATH)/dc/dcn315/,$(DCN315))
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMD_DAL_DCN315)
|
@ -1,30 +0,0 @@
|
||||
#
|
||||
# Copyright 2021 Advanced Micro Devices, Inc.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
# to deal in the Software without restriction, including without limitation
|
||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
# and/or sell copies of the Software, and to permit persons to whom the
|
||||
# Software is furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
# OTHER DEALINGS IN THE SOFTWARE.
|
||||
#
|
||||
# Authors: AMD
|
||||
#
|
||||
# Makefile for dcn316.
|
||||
|
||||
DCN316 = dcn316_resource.o
|
||||
|
||||
AMD_DAL_DCN316 = $(addprefix $(AMDDALPATH)/dc/dcn316/,$(DCN316))
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMD_DAL_DCN316)
|
@ -10,10 +10,10 @@
|
||||
#
|
||||
# Makefile for dcn32.
|
||||
|
||||
DCN32 = dcn32_resource.o dcn32_hubbub.o dcn32_init.o dcn32_dccg.o \
|
||||
dcn32_dccg.o dcn32_optc.o dcn32_mmhubbub.o dcn32_hubp.o dcn32_dpp.o \
|
||||
dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_hpo_dp_link_encoder.o \
|
||||
dcn32_resource_helpers.o dcn32_mpc.o
|
||||
DCN32 = dcn32_hubbub.o dcn32_init.o dcn32_dccg.o \
|
||||
dcn32_mmhubbub.o dcn32_dpp.o dcn32_hubp.o dcn32_mpc.o \
|
||||
dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_resource_helpers.o \
|
||||
dcn32_hpo_dp_link_encoder.o
|
||||
|
||||
AMD_DAL_DCN32 = $(addprefix $(AMDDALPATH)/dc/dcn32/,$(DCN32))
|
||||
|
||||
|
@ -24,7 +24,7 @@
|
||||
*/
|
||||
|
||||
// header file of functions being implemented
|
||||
#include "dcn32_resource.h"
|
||||
#include "dcn32/dcn32_resource.h"
|
||||
#include "dcn20/dcn20_resource.h"
|
||||
#include "dml/dcn32/display_mode_vba_util_32.h"
|
||||
#include "dml/dcn32/dcn32_fpu.h"
|
||||
@ -665,6 +665,30 @@ bool dcn32_check_native_scaling_for_res(struct pipe_ctx *pipe, unsigned int widt
|
||||
return is_native_scaling;
|
||||
}
|
||||
|
||||
/**
|
||||
* disallow_subvp_in_active_plus_blank() - Function to determine disallowed subvp + drr/vblank configs
|
||||
*
|
||||
* @pipe: subvp pipe to be used for the subvp + drr/vblank config
|
||||
*
|
||||
* Since subvp is being enabled on more configs (such as 1080p60), we want
|
||||
* to explicitly block any configs that we don't want to enable. We do not
|
||||
* want to enable any 1080p60 (SubVP) + drr / vblank configs since these
|
||||
* are already convered by FPO.
|
||||
*
|
||||
* Return: True if disallowed, false otherwise
|
||||
*/
|
||||
static bool disallow_subvp_in_active_plus_blank(struct pipe_ctx *pipe)
|
||||
{
|
||||
bool disallow = false;
|
||||
|
||||
if (resource_is_pipe_type(pipe, OPP_HEAD) &&
|
||||
resource_is_pipe_type(pipe, DPP_PIPE)) {
|
||||
if (pipe->stream->timing.v_addressable == 1080 && pipe->stream->timing.h_addressable == 1920)
|
||||
disallow = true;
|
||||
}
|
||||
return disallow;
|
||||
}
|
||||
|
||||
/**
|
||||
* dcn32_subvp_drr_admissable() - Determine if SubVP + DRR config is admissible
|
||||
*
|
||||
@ -688,6 +712,7 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
|
||||
bool drr_pipe_found = false;
|
||||
bool drr_psr_capable = false;
|
||||
uint64_t refresh_rate = 0;
|
||||
bool subvp_disallow = false;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
@ -697,6 +722,7 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
|
||||
if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
|
||||
subvp_count++;
|
||||
|
||||
subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe);
|
||||
refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
|
||||
pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
|
||||
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
|
||||
@ -713,7 +739,7 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
|
||||
}
|
||||
}
|
||||
|
||||
if (subvp_count == 1 && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable &&
|
||||
if (subvp_count == 1 && !subvp_disallow && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable &&
|
||||
((uint32_t)refresh_rate < 120))
|
||||
result = true;
|
||||
|
||||
@ -746,6 +772,7 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
|
||||
bool vblank_psr_capable = false;
|
||||
uint64_t refresh_rate = 0;
|
||||
bool subvp_disallow = false;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
@ -755,6 +782,7 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
|
||||
subvp_count++;
|
||||
|
||||
subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe);
|
||||
refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
|
||||
pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
|
||||
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
|
||||
@ -772,7 +800,7 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
}
|
||||
|
||||
if (subvp_count == 1 && non_subvp_pipes == 1 && !drr_pipe_found && !vblank_psr_capable &&
|
||||
((uint32_t)refresh_rate < 120) &&
|
||||
((uint32_t)refresh_rate < 120) && !subvp_disallow &&
|
||||
vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp)
|
||||
result = true;
|
||||
|
||||
|
@ -10,7 +10,7 @@
|
||||
#
|
||||
# Makefile for dcn321.
|
||||
|
||||
DCN321 = dcn321_resource.o dcn321_dio_link_encoder.o
|
||||
DCN321 = dcn321_dio_link_encoder.o
|
||||
|
||||
AMD_DAL_DCN321 = $(addprefix $(AMDDALPATH)/dc/dcn321/,$(DCN321))
|
||||
|
||||
|
@ -10,9 +10,9 @@
|
||||
#
|
||||
# Makefile for DCN35.
|
||||
|
||||
DCN35 = dcn35_resource.o dcn35_init.o dcn35_dio_stream_encoder.o \
|
||||
dcn35_dio_link_encoder.o dcn35_dccg.o dcn35_optc.o \
|
||||
dcn35_dsc.o dcn35_hubp.o dcn35_hubbub.o \
|
||||
DCN35 = dcn35_init.o dcn35_dio_stream_encoder.o \
|
||||
dcn35_dio_link_encoder.o dcn35_dccg.o \
|
||||
dcn35_hubp.o dcn35_hubbub.o \
|
||||
dcn35_mmhubbub.o dcn35_opp.o dcn35_dpp.o dcn35_pg_cntl.o dcn35_dwb.o
|
||||
|
||||
AMD_DAL_DCN35 = $(addprefix $(AMDDALPATH)/dc/dcn35/,$(DCN35))
|
||||
|
@ -256,6 +256,21 @@ static void dccg35_set_dtbclk_dto(
|
||||
if (params->ref_dtbclk_khz && req_dtbclk_khz) {
|
||||
uint32_t modulo, phase;
|
||||
|
||||
switch (params->otg_inst) {
|
||||
case 0:
|
||||
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 1);
|
||||
break;
|
||||
case 1:
|
||||
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, 1);
|
||||
break;
|
||||
case 2:
|
||||
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, 1);
|
||||
break;
|
||||
case 3:
|
||||
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, 1);
|
||||
break;
|
||||
}
|
||||
|
||||
// phase / modulo = dtbclk / dtbclk ref
|
||||
modulo = params->ref_dtbclk_khz * 1000;
|
||||
phase = req_dtbclk_khz * 1000;
|
||||
@ -280,6 +295,21 @@ static void dccg35_set_dtbclk_dto(
|
||||
REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
|
||||
PIPE_DTO_SRC_SEL[params->otg_inst], 2);
|
||||
} else {
|
||||
switch (params->otg_inst) {
|
||||
case 0:
|
||||
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 0);
|
||||
break;
|
||||
case 1:
|
||||
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, 0);
|
||||
break;
|
||||
case 2:
|
||||
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, 0);
|
||||
break;
|
||||
case 3:
|
||||
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, 0);
|
||||
break;
|
||||
}
|
||||
|
||||
REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[params->otg_inst],
|
||||
DTBCLK_DTO_ENABLE[params->otg_inst], 0,
|
||||
PIPE_DTO_SRC_SEL[params->otg_inst], params->is_hdmi ? 0 : 1);
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define DCCG_REG_LIST_DCN35() \
|
||||
DCCG_REG_LIST_DCN314(),\
|
||||
SR(DPPCLK_CTRL),\
|
||||
SR(DCCG_GATE_DISABLE_CNTL5),\
|
||||
SR(DCCG_GATE_DISABLE_CNTL6),\
|
||||
SR(DCCG_GLOBAL_FGCG_REP_CNTL),\
|
||||
SR(SYMCLKA_CLOCK_ENABLE),\
|
||||
@ -174,7 +175,11 @@
|
||||
DCCG_SF(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_SRC_SEL, mask_sh),\
|
||||
DCCG_SF(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_SRC_SEL, mask_sh),\
|
||||
DCCG_SF(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_SRC_SEL, mask_sh),\
|
||||
DCCG_SF(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_SRC_SEL, mask_sh)
|
||||
DCCG_SF(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_SRC_SEL, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, mask_sh),\
|
||||
|
||||
struct dccg *dccg35_create(
|
||||
struct dc_context *ctx,
|
||||
|
@ -261,6 +261,7 @@ void pg_cntl35_hpo_pg_control(struct pg_cntl *pg_cntl, bool power_on)
|
||||
uint32_t power_gate = power_on ? 0 : 1;
|
||||
uint32_t pwr_status = power_on ? 0 : 2;
|
||||
uint32_t org_ip_request_cntl;
|
||||
uint32_t power_forceon;
|
||||
bool block_enabled;
|
||||
|
||||
if (pg_cntl->ctx->dc->debug.ignore_pg ||
|
||||
@ -277,6 +278,10 @@ void pg_cntl35_hpo_pg_control(struct pg_cntl *pg_cntl, bool power_on)
|
||||
return;
|
||||
}
|
||||
|
||||
REG_GET(DOMAIN25_PG_CONFIG, DOMAIN_POWER_FORCEON, &power_forceon);
|
||||
if (power_forceon)
|
||||
return;
|
||||
|
||||
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
|
||||
if (org_ip_request_cntl == 0)
|
||||
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
|
||||
@ -304,6 +309,7 @@ void pg_cntl35_io_clk_pg_control(struct pg_cntl *pg_cntl, bool power_on)
|
||||
uint32_t power_gate = power_on ? 0 : 1;
|
||||
uint32_t pwr_status = power_on ? 0 : 2;
|
||||
uint32_t org_ip_request_cntl;
|
||||
uint32_t power_forceon;
|
||||
bool block_enabled;
|
||||
|
||||
if (pg_cntl->ctx->dc->debug.ignore_pg ||
|
||||
@ -319,6 +325,10 @@ void pg_cntl35_io_clk_pg_control(struct pg_cntl *pg_cntl, bool power_on)
|
||||
return;
|
||||
}
|
||||
|
||||
REG_GET(DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, &power_forceon);
|
||||
if (power_forceon)
|
||||
return;
|
||||
|
||||
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
|
||||
if (org_ip_request_cntl == 0)
|
||||
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
|
||||
@ -332,13 +342,6 @@ void pg_cntl35_io_clk_pg_control(struct pg_cntl *pg_cntl, bool power_on)
|
||||
pg_cntl->pg_res_enable[PG_DCIO] = power_on;
|
||||
}
|
||||
|
||||
void pg_cntl35_set_force_poweron_domain22(struct pg_cntl *pg_cntl, bool power_on)
|
||||
{
|
||||
struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
|
||||
|
||||
REG_UPDATE(DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, power_on ? 1 : 0);
|
||||
}
|
||||
|
||||
static bool pg_cntl35_plane_otg_status(struct pg_cntl *pg_cntl)
|
||||
{
|
||||
struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
|
||||
@ -508,8 +511,7 @@ static const struct pg_cntl_funcs pg_cntl35_funcs = {
|
||||
.mpcc_pg_control = pg_cntl35_mpcc_pg_control,
|
||||
.opp_pg_control = pg_cntl35_opp_pg_control,
|
||||
.optc_pg_control = pg_cntl35_optc_pg_control,
|
||||
.dwb_pg_control = pg_cntl35_dwb_pg_control,
|
||||
.set_force_poweron_domain22 = pg_cntl35_set_force_poweron_domain22
|
||||
.dwb_pg_control = pg_cntl35_dwb_pg_control
|
||||
};
|
||||
|
||||
struct pg_cntl *pg_cntl35_create(
|
||||
|
@ -183,7 +183,6 @@ void pg_cntl35_optc_pg_control(struct pg_cntl *pg_cntl,
|
||||
unsigned int optc_inst, bool power_on);
|
||||
void pg_cntl35_dwb_pg_control(struct pg_cntl *pg_cntl, bool power_on);
|
||||
void pg_cntl35_init_pg_status(struct pg_cntl *pg_cntl);
|
||||
void pg_cntl35_set_force_poweron_domain22(struct pg_cntl *pg_cntl, bool power_on);
|
||||
|
||||
struct pg_cntl *pg_cntl35_create(
|
||||
struct dc_context *ctx,
|
||||
|
@ -103,10 +103,16 @@ enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
|
||||
/*
|
||||
* Sends ALLOCATE_PAYLOAD message.
|
||||
*/
|
||||
bool dm_helpers_dp_mst_send_payload_allocation(
|
||||
void dm_helpers_dp_mst_send_payload_allocation(
|
||||
struct dc_context *ctx,
|
||||
const struct dc_stream_state *stream,
|
||||
bool enable);
|
||||
const struct dc_stream_state *stream);
|
||||
|
||||
/*
|
||||
* Update mst manager relevant variables
|
||||
*/
|
||||
void dm_helpers_dp_mst_update_mst_mgr_for_deallocation(
|
||||
struct dc_context *ctx,
|
||||
const struct dc_stream_state *stream);
|
||||
|
||||
bool dm_helpers_dp_mst_start_top_mgr(
|
||||
struct dc_context *ctx,
|
||||
|
@ -30,7 +30,7 @@
|
||||
#include "dcn_calc_auto.h"
|
||||
#include "dal_asic_id.h"
|
||||
#include "resource.h"
|
||||
#include "dcn10/dcn10_resource.h"
|
||||
#include "resource/dcn10/dcn10_resource.h"
|
||||
#include "dcn10/dcn10_hubbub.h"
|
||||
#include "dml/dml1_display_rq_dlg_calc.h"
|
||||
|
||||
|
@ -36,7 +36,7 @@
|
||||
* Define the maximum amount of states supported by the ASIC. Every ASIC has a
|
||||
* specific number of states; this macro defines the maximum number of states.
|
||||
*/
|
||||
#define DC__VOLTAGE_STATES 20
|
||||
#define DC__VOLTAGE_STATES 40
|
||||
#define DC__NUM_DPP__4 1
|
||||
#define DC__NUM_DPP__0_PRESENT 1
|
||||
#define DC__NUM_DPP__1_PRESENT 1
|
||||
|
@ -950,10 +950,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
|
||||
{
|
||||
int plane_count;
|
||||
int i;
|
||||
unsigned int min_dst_y_next_start_us;
|
||||
|
||||
plane_count = 0;
|
||||
min_dst_y_next_start_us = 0;
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (context->res_ctx.pipe_ctx[i].plane_state)
|
||||
plane_count++;
|
||||
@ -975,26 +973,15 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
|
||||
else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
|
||||
struct dc_link *link = context->streams[0]->sink->link;
|
||||
struct dc_stream_status *stream_status = &context->stream_status[0];
|
||||
struct dc_stream_state *current_stream = context->streams[0];
|
||||
int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
|
||||
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
|
||||
bool is_pwrseq0 = link->link_index == 0;
|
||||
bool isFreesyncVideo;
|
||||
|
||||
isFreesyncVideo = current_stream->adjust.v_total_min == current_stream->adjust.v_total_max;
|
||||
isFreesyncVideo = isFreesyncVideo && current_stream->timing.v_total < current_stream->adjust.v_total_min;
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (context->res_ctx.pipe_ctx[i].stream == current_stream && isFreesyncVideo) {
|
||||
min_dst_y_next_start_us = context->res_ctx.pipe_ctx[i].dlg_regs.min_dst_y_next_start_us;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Don't support multi-plane configurations */
|
||||
if (stream_status->plane_count > 1)
|
||||
return DCN_ZSTATE_SUPPORT_DISALLOW;
|
||||
|
||||
if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || min_dst_y_next_start_us > 5000))
|
||||
if (is_pwrseq0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
|
||||
return DCN_ZSTATE_SUPPORT_ALLOW;
|
||||
else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
|
||||
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
|
||||
|
@ -45,6 +45,14 @@ static const struct subvp_high_refresh_list subvp_high_refresh_list = {
|
||||
{.width = 1920, .height = 1080, }},
|
||||
};
|
||||
|
||||
static const struct subvp_active_margin_list subvp_active_margin_list = {
|
||||
.min_refresh = 55,
|
||||
.max_refresh = 65,
|
||||
.res = {
|
||||
{.width = 2560, .height = 1440, },
|
||||
{.width = 1920, .height = 1080, }},
|
||||
};
|
||||
|
||||
struct _vcs_dpi_ip_params_st dcn3_2_ip = {
|
||||
.gpuvm_enable = 0,
|
||||
.gpuvm_max_page_table_levels = 4,
|
||||
@ -1192,13 +1200,16 @@ static bool update_pipe_slice_table_with_split_flags(
|
||||
*/
|
||||
struct pipe_ctx *pipe;
|
||||
bool odm;
|
||||
int i;
|
||||
int dc_pipe_idx, dml_pipe_idx = 0;
|
||||
bool updated = false;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
pipe = &context->res_ctx.pipe_ctx[i];
|
||||
for (dc_pipe_idx = 0;
|
||||
dc_pipe_idx < dc->res_pool->pipe_count; dc_pipe_idx++) {
|
||||
pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx];
|
||||
if (resource_is_pipe_type(pipe, FREE_PIPE))
|
||||
continue;
|
||||
|
||||
if (merge[i]) {
|
||||
if (merge[dc_pipe_idx]) {
|
||||
if (resource_is_pipe_type(pipe, OPP_HEAD))
|
||||
/* merging OPP head means reducing ODM slice
|
||||
* count by 1
|
||||
@ -1213,17 +1224,18 @@ static bool update_pipe_slice_table_with_split_flags(
|
||||
updated = true;
|
||||
}
|
||||
|
||||
if (split[i]) {
|
||||
odm = vba->ODMCombineEnabled[vba->pipe_plane[i]] !=
|
||||
if (split[dc_pipe_idx]) {
|
||||
odm = vba->ODMCombineEnabled[vba->pipe_plane[dml_pipe_idx]] !=
|
||||
dm_odm_combine_mode_disabled;
|
||||
if (odm && resource_is_pipe_type(pipe, OPP_HEAD))
|
||||
update_slice_table_for_stream(
|
||||
table, pipe->stream, split[i] - 1);
|
||||
table, pipe->stream, split[dc_pipe_idx] - 1);
|
||||
else if (!odm && resource_is_pipe_type(pipe, DPP_PIPE))
|
||||
update_slice_table_for_plane(table, pipe,
|
||||
pipe->plane_state, split[i] - 1);
|
||||
pipe->plane_state, split[dc_pipe_idx] - 1);
|
||||
updated = true;
|
||||
}
|
||||
dml_pipe_idx++;
|
||||
}
|
||||
return updated;
|
||||
}
|
||||
@ -1233,15 +1245,11 @@ static void update_pipes_with_slice_table(struct dc *dc, struct dc_state *contex
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < table->odm_combine_count; i++) {
|
||||
for (i = 0; i < table->odm_combine_count; i++)
|
||||
resource_update_pipes_for_stream_with_slice_count(context,
|
||||
dc->current_state, dc->res_pool,
|
||||
table->odm_combines[i].stream,
|
||||
table->odm_combines[i].slice_count);
|
||||
/* TODO: move this into the function above */
|
||||
dcn20_build_mapped_resource(dc, context,
|
||||
table->odm_combines[i].stream);
|
||||
}
|
||||
|
||||
for (i = 0; i < table->mpc_combine_count; i++)
|
||||
resource_update_pipes_for_plane_with_slice_count(context,
|
||||
@ -1408,6 +1416,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
|
||||
unsigned int dc_pipe_idx = 0;
|
||||
int i = 0;
|
||||
bool found_supported_config = false;
|
||||
int vlevel_temp = 0;
|
||||
|
||||
dc_assert_fp_enabled();
|
||||
|
||||
@ -1440,13 +1449,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
|
||||
*/
|
||||
if (!dc->debug.force_disable_subvp && !dc->caps.dmub_caps.gecc_enable && dcn32_all_pipes_have_stream_and_plane(dc, context) &&
|
||||
!dcn32_mpo_in_use(context) && !dcn32_any_surfaces_rotated(dc, context) && !is_test_pattern_enabled(context) &&
|
||||
(*vlevel == context->bw_ctx.dml.soc.num_states ||
|
||||
(*vlevel == context->bw_ctx.dml.soc.num_states || (vba->DRAMSpeedPerState[*vlevel] != vba->DRAMSpeedPerState[0] &&
|
||||
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported) ||
|
||||
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
|
||||
dc->debug.force_subvp_mclk_switch)) {
|
||||
|
||||
dcn32_merge_pipes_for_subvp(dc, context);
|
||||
memset(merge, 0, MAX_PIPES * sizeof(bool));
|
||||
|
||||
vlevel_temp = *vlevel;
|
||||
/* to re-initialize viewport after the pipe merge */
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
@ -1515,6 +1526,9 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
|
||||
}
|
||||
}
|
||||
|
||||
if (vba->DRAMSpeedPerState[*vlevel] >= vba->DRAMSpeedPerState[vlevel_temp])
|
||||
found_supported_config = false;
|
||||
|
||||
// If SubVP pipe config is unsupported (or cannot be used for UCLK switching)
|
||||
// remove phantom pipes and repopulate dml pipes
|
||||
if (!found_supported_config) {
|
||||
@ -2231,6 +2245,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
|
||||
int i, pipe_idx, vlevel_temp = 0;
|
||||
double dcfclk = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
|
||||
double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
|
||||
double dram_speed_from_validation = context->bw_ctx.dml.vba.DRAMSpeed;
|
||||
double dcfclk_from_fw_based_mclk_switching = dcfclk_from_validation;
|
||||
bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
|
||||
dm_dram_clock_change_unsupported;
|
||||
@ -2418,7 +2433,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
|
||||
}
|
||||
|
||||
if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
|
||||
min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
|
||||
min_dram_speed_mts = dram_speed_from_validation;
|
||||
min_dram_speed_mts_margin = 160;
|
||||
|
||||
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
|
||||
@ -3294,25 +3309,24 @@ bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe)
|
||||
{
|
||||
bool allow = false;
|
||||
uint32_t refresh_rate = 0;
|
||||
uint32_t min_refresh = subvp_active_margin_list.min_refresh;
|
||||
uint32_t max_refresh = subvp_active_margin_list.max_refresh;
|
||||
uint32_t i;
|
||||
|
||||
/* Allow subvp on displays that have active margin for 2560x1440@60hz displays
|
||||
* only for now. There must be no scaling as well.
|
||||
*
|
||||
* For now we only enable on 2560x1440@60hz displays to enable 4K60 + 1440p60 configs
|
||||
* for p-state switching.
|
||||
*/
|
||||
if (pipe->stream && pipe->plane_state) {
|
||||
refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
|
||||
pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
|
||||
/ (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
|
||||
if (pipe->stream->timing.v_addressable == 1440 &&
|
||||
pipe->stream->timing.h_addressable == 2560 &&
|
||||
refresh_rate >= 55 && refresh_rate <= 65 &&
|
||||
pipe->plane_state->src_rect.height == 1440 &&
|
||||
pipe->plane_state->src_rect.width == 2560 &&
|
||||
pipe->plane_state->dst_rect.height == 1440 &&
|
||||
pipe->plane_state->dst_rect.width == 2560)
|
||||
for (i = 0; i < SUBVP_ACTIVE_MARGIN_LIST_LEN; i++) {
|
||||
uint32_t width = subvp_active_margin_list.res[i].width;
|
||||
uint32_t height = subvp_active_margin_list.res[i].height;
|
||||
|
||||
refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
|
||||
pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
|
||||
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
|
||||
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
|
||||
|
||||
if (refresh_rate >= min_refresh && refresh_rate <= max_refresh &&
|
||||
dcn32_check_native_scaling_for_res(pipe, width, height)) {
|
||||
allow = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return allow;
|
||||
}
|
||||
|
@ -164,10 +164,10 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
|
||||
},
|
||||
},
|
||||
.num_states = 5,
|
||||
.sr_exit_time_us = 9.0,
|
||||
.sr_enter_plus_exit_time_us = 11.0,
|
||||
.sr_exit_z8_time_us = 50.0, /*changed from 442.0*/
|
||||
.sr_enter_plus_exit_z8_time_us = 50.0,/*changed from 560.0*/
|
||||
.sr_exit_time_us = 14.0,
|
||||
.sr_enter_plus_exit_time_us = 16.0,
|
||||
.sr_exit_z8_time_us = 525.0,
|
||||
.sr_enter_plus_exit_z8_time_us = 715.0,
|
||||
.fclk_change_latency_us = 20.0,
|
||||
.usr_retraining_latency_us = 2,
|
||||
.writeback_latency_us = 12.0,
|
||||
@ -329,6 +329,48 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
|
||||
/*temp till dml2 fully work without dml1*/
|
||||
dml_init_instance(&dc->dml, &dcn3_5_soc, &dcn3_5_ip,
|
||||
DML_PROJECT_DCN31);
|
||||
|
||||
/*copy to dml2, before dml2_create*/
|
||||
if (clk_table->num_entries > 2) {
|
||||
|
||||
for (i = 0; i < clk_table->num_entries; i++) {
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_states =
|
||||
clk_table->num_entries;
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz =
|
||||
clock_limits[i].dcfclk_mhz;
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz =
|
||||
clock_limits[i].fabricclk_mhz;
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz =
|
||||
clock_limits[i].dispclk_mhz;
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz =
|
||||
clock_limits[i].dppclk_mhz;
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz =
|
||||
clock_limits[i].socclk_mhz;
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
|
||||
clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
|
||||
clk_table->num_entries;
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
|
||||
clk_table->num_entries;
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels =
|
||||
clk_table->num_entries;
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels =
|
||||
clk_table->num_entries;
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels =
|
||||
clk_table->num_entries;
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
|
||||
clk_table->num_entries;
|
||||
}
|
||||
}
|
||||
|
||||
/* Update latency values */
|
||||
dc->dml2_options.bbox_overrides.dram_clock_change_latency_us = dcn3_5_soc.dram_clock_change_latency_us;
|
||||
|
||||
dc->dml2_options.bbox_overrides.sr_exit_latency_us = dcn3_5_soc.sr_exit_time_us;
|
||||
dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us = dcn3_5_soc.sr_enter_plus_exit_time_us;
|
||||
|
||||
dc->dml2_options.bbox_overrides.sr_exit_z8_time_us = dcn3_5_soc.sr_exit_z8_time_us;
|
||||
dc->dml2_options.bbox_overrides.sr_enter_plus_exit_z8_time_us = dcn3_5_soc.sr_enter_plus_exit_z8_time_us;
|
||||
}
|
||||
|
||||
static bool is_dual_plane(enum surface_pixel_format format)
|
||||
@ -507,3 +549,37 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
|
||||
|
||||
return pipe_cnt;
|
||||
}
|
||||
|
||||
void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
enum dcn_zstate_support_state support = DCN_ZSTATE_SUPPORT_DISALLOW;
|
||||
unsigned int i, plane_count = 0;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (context->res_ctx.pipe_ctx[i].plane_state)
|
||||
plane_count++;
|
||||
}
|
||||
|
||||
if (plane_count == 0) {
|
||||
support = DCN_ZSTATE_SUPPORT_ALLOW;
|
||||
} else if (plane_count == 1 && context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
|
||||
struct dc_link *link = context->streams[0]->sink->link;
|
||||
bool is_pwrseq0 = link && link->link_index == 0;
|
||||
bool is_psr1 = link && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr;
|
||||
int minmum_z8_residency =
|
||||
dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
|
||||
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
|
||||
int minmum_z10_residency =
|
||||
dc->debug.minimum_z10_residency_time > 0 ? dc->debug.minimum_z10_residency_time : 5000;
|
||||
bool allow_z10 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z10_residency;
|
||||
|
||||
if (is_pwrseq0 && allow_z10)
|
||||
support = DCN_ZSTATE_SUPPORT_ALLOW;
|
||||
else if (is_pwrseq0 && is_psr1)
|
||||
support = allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
|
||||
else if (allow_z8)
|
||||
support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
|
||||
}
|
||||
|
||||
context->bw_ctx.bw.dcn.clk.zstate_support = support;
|
||||
}
|
||||
|
@ -39,4 +39,6 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
bool fast_validate);
|
||||
|
||||
void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context);
|
||||
|
||||
#endif
|
||||
|
@ -6329,7 +6329,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
|
||||
mode_lib->ms.NoOfDPPThisState,
|
||||
mode_lib->ms.dpte_group_bytes,
|
||||
s->HostVMInefficiencyFactor,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
|
||||
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
|
||||
|
||||
s->NextMaxVStartup = s->MaxVStartupAllPlanes[j];
|
||||
@ -6542,7 +6542,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
|
||||
mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
|
||||
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
|
||||
mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
|
||||
mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k],
|
||||
mode_lib->ms.MetaRowBytes[j][k],
|
||||
mode_lib->ms.DPTEBytesPerRow[j][k],
|
||||
@ -7687,7 +7687,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
|
||||
CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
|
||||
CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
|
||||
CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
|
||||
CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
|
||||
CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
|
||||
CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
|
||||
CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
|
||||
CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = mode_lib->ms.PTEBufferSizeNotExceededPerState;
|
||||
@ -7957,7 +7957,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
|
||||
UseMinimumDCFCLK_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
|
||||
UseMinimumDCFCLK_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
|
||||
UseMinimumDCFCLK_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
|
||||
UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
|
||||
UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
|
||||
UseMinimumDCFCLK_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
|
||||
UseMinimumDCFCLK_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
|
||||
UseMinimumDCFCLK_params->ImmediateFlipRequirement = s->ImmediateFlipRequiredFinal;
|
||||
@ -8699,7 +8699,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
|
||||
CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
|
||||
CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
|
||||
CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
|
||||
CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
|
||||
CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
|
||||
CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
|
||||
CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
|
||||
CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = s->dummy_boolean_array[0];
|
||||
@ -8805,7 +8805,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
|
||||
mode_lib->ms.cache_display_cfg.hw.DPPPerSurface,
|
||||
locals->dpte_group_bytes,
|
||||
s->HostVMInefficiencyFactor,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
|
||||
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
|
||||
|
||||
locals->TCalc = 24.0 / locals->DCFCLKDeepSleep;
|
||||
@ -8995,7 +8995,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
|
||||
CalculatePrefetchSchedule_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable;
|
||||
CalculatePrefetchSchedule_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
|
||||
CalculatePrefetchSchedule_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
|
||||
CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
|
||||
CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
|
||||
CalculatePrefetchSchedule_params->DynamicMetadataEnable = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataEnable[k];
|
||||
CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
|
||||
CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataLinesBeforeActiveRequired[k];
|
||||
@ -9240,7 +9240,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
|
||||
mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
|
||||
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
|
||||
mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
|
||||
locals->PDEAndMetaPTEBytesFrame[k],
|
||||
locals->MetaRowByte[k],
|
||||
locals->PixelPTEBytesPerRow[k],
|
||||
|
@ -772,18 +772,29 @@ static unsigned int get_mpc_factor(struct dml2_context *ctx,
|
||||
const struct dc_state *state,
|
||||
const struct dml_display_cfg_st *disp_cfg,
|
||||
struct dml2_dml_to_dc_pipe_mapping *mapping,
|
||||
const struct dc_stream_status *status, unsigned int stream_id,
|
||||
const struct dc_stream_status *status,
|
||||
const struct dc_stream_state *stream,
|
||||
int plane_idx)
|
||||
{
|
||||
unsigned int plane_id;
|
||||
unsigned int cfg_idx;
|
||||
unsigned int mpc_factor;
|
||||
|
||||
get_plane_id(ctx, state, status->plane_states[plane_idx], stream_id, plane_idx, &plane_id);
|
||||
get_plane_id(ctx, state, status->plane_states[plane_idx],
|
||||
stream->stream_id, plane_idx, &plane_id);
|
||||
cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id);
|
||||
if (ctx->architecture == dml2_architecture_20)
|
||||
return (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx];
|
||||
ASSERT(false);
|
||||
return 1;
|
||||
if (ctx->architecture == dml2_architecture_20) {
|
||||
mpc_factor = (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx];
|
||||
} else {
|
||||
mpc_factor = 1;
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
/* For stereo timings, we need to pipe split */
|
||||
if (dml2_is_stereo_timing(stream))
|
||||
mpc_factor = 2;
|
||||
|
||||
return mpc_factor;
|
||||
}
|
||||
|
||||
static unsigned int get_odm_factor(
|
||||
@ -820,14 +831,13 @@ static void populate_mpc_factors_for_stream(
|
||||
unsigned int mpc_factors[MAX_PIPES])
|
||||
{
|
||||
const struct dc_stream_status *status = &state->stream_status[stream_idx];
|
||||
unsigned int stream_id = state->streams[stream_idx]->stream_id;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < status->plane_count; i++)
|
||||
if (odm_factor == 1)
|
||||
mpc_factors[i] = get_mpc_factor(
|
||||
ctx, state, disp_cfg, mapping, status,
|
||||
stream_id, i);
|
||||
state->streams[stream_idx], i);
|
||||
else
|
||||
mpc_factors[i] = 1;
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user