Merge tag 'amd-drm-next-5.12-2021-02-18' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-5.12-2021-02-18: amdgpu: - Prefer Bhawan's unused variable fix - Fixes for high priority queues on gfx8,9 - swSMU fixes for sienna cichlid - swSMU fixes for renoir - mmhub client id fixes for arcturus - SMUIO fixes for navi family - swSMU fixes for vangogh - GPU reset cleanup - Display fixes - GFX harvesting fix for sienna cichlid - Fix reference clock on Renoir - Misc fixes and cleanups amdkfd: - Fix for unique id query - Fix recursive lock warnings radeon: - Remove confusing VCE messages on Oland Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210218221531.3870-1-alexander.deucher@amd.com
This commit is contained in:
commit
4f8ad4045b
@ -171,7 +171,8 @@ amdgpu-y += \
|
||||
# add SMUIO block
|
||||
amdgpu-y += \
|
||||
smuio_v9_0.o \
|
||||
smuio_v11_0.o
|
||||
smuio_v11_0.o \
|
||||
smuio_v11_0_6.o
|
||||
|
||||
# add amdkfd interfaces
|
||||
amdgpu-y += amdgpu_amdkfd.o
|
||||
|
@ -287,7 +287,7 @@ enum amdgpu_kiq_irq {
|
||||
|
||||
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
|
||||
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
|
||||
#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */
|
||||
#define MAX_KIQ_REG_TRY 1000
|
||||
|
||||
int amdgpu_device_ip_set_clockgating_state(void *dev,
|
||||
enum amd_ip_block_type block_type,
|
||||
@ -579,7 +579,8 @@ enum amd_reset_method {
|
||||
AMD_RESET_METHOD_MODE0,
|
||||
AMD_RESET_METHOD_MODE1,
|
||||
AMD_RESET_METHOD_MODE2,
|
||||
AMD_RESET_METHOD_BACO
|
||||
AMD_RESET_METHOD_BACO,
|
||||
AMD_RESET_METHOD_PCI,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -891,6 +892,7 @@ struct amdgpu_device {
|
||||
/* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
|
||||
struct work_struct hotplug_work;
|
||||
struct amdgpu_irq_src crtc_irq;
|
||||
struct amdgpu_irq_src vline0_irq;
|
||||
struct amdgpu_irq_src vupdate_irq;
|
||||
struct amdgpu_irq_src pageflip_irq;
|
||||
struct amdgpu_irq_src hpd_irq;
|
||||
@ -1227,6 +1229,7 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
|
||||
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
struct amdgpu_job* job);
|
||||
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
|
||||
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
|
||||
bool amdgpu_device_need_post(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
|
||||
|
@ -98,8 +98,7 @@ static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
|
||||
return 0;
|
||||
|
||||
error_free:
|
||||
if (info)
|
||||
kvfree(info);
|
||||
kvfree(info);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -929,6 +929,18 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
|
||||
pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_pci_reset - reset the GPU using generic PCI means
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
|
||||
*/
|
||||
int amdgpu_device_pci_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
return pci_reset_function(adev->pdev);
|
||||
}
|
||||
|
||||
/*
|
||||
* GPU doorbell aperture helpers function.
|
||||
*/
|
||||
@ -1433,10 +1445,8 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
|
||||
amdgpu_device_resume(dev, true);
|
||||
|
||||
dev->switch_power_state = DRM_SWITCH_POWER_ON;
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
} else {
|
||||
pr_info("switched off\n");
|
||||
drm_kms_helper_poll_disable(dev);
|
||||
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
amdgpu_device_suspend(dev, true);
|
||||
amdgpu_device_cache_pci_state(pdev);
|
||||
@ -3724,7 +3734,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
|
||||
|
||||
r = amdgpu_device_ip_suspend_phase1(adev);
|
||||
|
||||
amdgpu_amdkfd_suspend(adev, !fbcon);
|
||||
amdgpu_amdkfd_suspend(adev, adev->in_runpm);
|
||||
|
||||
/* evict vram memory */
|
||||
amdgpu_bo_evict_vram(adev);
|
||||
@ -3808,7 +3818,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
|
||||
}
|
||||
}
|
||||
}
|
||||
r = amdgpu_amdkfd_resume(adev, !fbcon);
|
||||
r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -4211,6 +4221,7 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
|
||||
case CHIP_NAVI12:
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
break;
|
||||
default:
|
||||
goto disabled;
|
||||
|
@ -132,8 +132,12 @@ uint amdgpu_pg_mask = 0xffffffff;
|
||||
uint amdgpu_sdma_phase_quantum = 32;
|
||||
char *amdgpu_disable_cu = NULL;
|
||||
char *amdgpu_virtual_display = NULL;
|
||||
/* OverDrive(bit 14) disabled by default*/
|
||||
uint amdgpu_pp_feature_mask = 0xffffbfff;
|
||||
|
||||
/*
|
||||
* OverDrive(bit 14) disabled by default
|
||||
* GFX DCS(bit 19) disabled by default
|
||||
*/
|
||||
uint amdgpu_pp_feature_mask = 0xfff7bfff;
|
||||
uint amdgpu_force_long_training;
|
||||
int amdgpu_job_hang_limit;
|
||||
int amdgpu_lbpw = -1;
|
||||
@ -789,9 +793,9 @@ module_param_named(tmz, amdgpu_tmz, int, 0444);
|
||||
|
||||
/**
|
||||
* DOC: reset_method (int)
|
||||
* GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
|
||||
* GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco, 5 = pci)
|
||||
*/
|
||||
MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco)");
|
||||
MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco, 5 = pci)");
|
||||
module_param_named(reset_method, amdgpu_reset_method, int, 0444);
|
||||
|
||||
/**
|
||||
@ -1344,11 +1348,12 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
|
||||
adev->in_runpm = true;
|
||||
if (amdgpu_device_supports_atpx(drm_dev))
|
||||
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
drm_kms_helper_poll_disable(drm_dev);
|
||||
|
||||
ret = amdgpu_device_suspend(drm_dev, false);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
adev->in_runpm = false;
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (amdgpu_device_supports_atpx(drm_dev)) {
|
||||
/* Only need to handle PCI state in the driver for ATPX
|
||||
@ -1401,7 +1406,6 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
|
||||
amdgpu_device_baco_exit(drm_dev);
|
||||
}
|
||||
ret = amdgpu_device_resume(drm_dev, false);
|
||||
drm_kms_helper_poll_enable(drm_dev);
|
||||
if (amdgpu_device_supports_atpx(drm_dev))
|
||||
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
|
||||
adev->in_runpm = false;
|
||||
|
@ -193,15 +193,16 @@ static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
|
||||
int pipe, int queue)
|
||||
struct amdgpu_ring *ring)
|
||||
{
|
||||
bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
|
||||
int cond;
|
||||
/* Policy: alternate between normal and high priority */
|
||||
cond = multipipe_policy ? pipe : queue;
|
||||
|
||||
return ((cond % 2) != 0);
|
||||
/* Policy: use 1st queue as high priority compute queue if we
|
||||
* have more than one compute queue.
|
||||
*/
|
||||
if (adev->gfx.num_compute_rings > 1 &&
|
||||
ring == &adev->gfx.compute_ring[0])
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
|
||||
|
@ -380,7 +380,7 @@ void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
|
||||
bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec,
|
||||
int pipe, int queue);
|
||||
bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
|
||||
int pipe, int queue);
|
||||
struct amdgpu_ring *ring);
|
||||
int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me,
|
||||
int pipe, int queue);
|
||||
void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
|
||||
|
@ -195,6 +195,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && ring->funcs->emit_mem_sync)
|
||||
ring->funcs->emit_mem_sync(ring);
|
||||
|
||||
if (ring->funcs->emit_wave_limit &&
|
||||
ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH)
|
||||
ring->funcs->emit_wave_limit(ring, true);
|
||||
|
||||
if (ring->funcs->insert_start)
|
||||
ring->funcs->insert_start(ring);
|
||||
|
||||
@ -295,6 +299,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
ring->current_ctx = fence_ctx;
|
||||
if (vm && ring->funcs->emit_switch_buffer)
|
||||
amdgpu_ring_emit_switch_buffer(ring);
|
||||
|
||||
if (ring->funcs->emit_wave_limit &&
|
||||
ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH)
|
||||
ring->funcs->emit_wave_limit(ring, false);
|
||||
|
||||
amdgpu_ring_commit(ring);
|
||||
return 0;
|
||||
}
|
||||
|
@ -88,6 +88,7 @@ struct amdgpu_nbio_funcs {
|
||||
int (*ras_late_init)(struct amdgpu_device *adev);
|
||||
void (*enable_aspm)(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
void (*program_aspm)(struct amdgpu_device *adev);
|
||||
};
|
||||
|
||||
struct amdgpu_nbio {
|
||||
|
@ -166,7 +166,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
||||
unsigned int max_dw, struct amdgpu_irq_src *irq_src,
|
||||
unsigned int irq_type, unsigned int hw_prio)
|
||||
{
|
||||
int r, i;
|
||||
int r;
|
||||
int sched_hw_submission = amdgpu_sched_hw_submission;
|
||||
u32 *num_sched;
|
||||
u32 hw_ip;
|
||||
@ -258,8 +258,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
||||
}
|
||||
|
||||
ring->max_dw = max_dw;
|
||||
ring->priority = DRM_SCHED_PRIORITY_NORMAL;
|
||||
mutex_init(&ring->priority_mutex);
|
||||
ring->hw_prio = hw_prio;
|
||||
|
||||
if (!ring->no_scheduler) {
|
||||
hw_ip = ring->funcs->type;
|
||||
@ -268,9 +267,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
||||
&ring->sched;
|
||||
}
|
||||
|
||||
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; ++i)
|
||||
atomic_set(&ring->num_jobs[i], 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -197,6 +197,7 @@ struct amdgpu_ring_funcs {
|
||||
void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
|
||||
int (*preempt_ib)(struct amdgpu_ring *ring);
|
||||
void (*emit_mem_sync)(struct amdgpu_ring *ring);
|
||||
void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable);
|
||||
};
|
||||
|
||||
struct amdgpu_ring {
|
||||
@ -242,11 +243,7 @@ struct amdgpu_ring {
|
||||
struct dma_fence *vmid_wait;
|
||||
bool has_compute_vm_bug;
|
||||
bool no_scheduler;
|
||||
|
||||
atomic_t num_jobs[DRM_SCHED_PRIORITY_COUNT];
|
||||
struct mutex priority_mutex;
|
||||
/* protected by priority_mutex */
|
||||
int priority;
|
||||
int hw_prio;
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
struct dentry *ent;
|
||||
|
@ -324,7 +324,7 @@ static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
|
||||
|
||||
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_hive_info *hive = NULL, *tmp = NULL;
|
||||
struct amdgpu_hive_info *hive = NULL;
|
||||
int ret;
|
||||
|
||||
if (!adev->gmc.xgmi.hive_id)
|
||||
@ -337,11 +337,9 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
|
||||
|
||||
mutex_lock(&xgmi_mutex);
|
||||
|
||||
if (!list_empty(&xgmi_hive_list)) {
|
||||
list_for_each_entry_safe(hive, tmp, &xgmi_hive_list, node) {
|
||||
if (hive->hive_id == adev->gmc.xgmi.hive_id)
|
||||
goto pro_end;
|
||||
}
|
||||
list_for_each_entry(hive, &xgmi_hive_list, node) {
|
||||
if (hive->hive_id == adev->gmc.xgmi.hive_id)
|
||||
goto pro_end;
|
||||
}
|
||||
|
||||
hive = kzalloc(sizeof(*hive), GFP_KERNEL);
|
||||
|
@ -1251,13 +1251,22 @@ static void kv_restore_regs_for_reset(struct amdgpu_device *adev,
|
||||
WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute);
|
||||
}
|
||||
|
||||
static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
|
||||
/**
|
||||
* cik_asic_pci_config_reset - soft reset GPU
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Use PCI Config method to reset the GPU.
|
||||
*
|
||||
* Returns 0 for success.
|
||||
*/
|
||||
static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
struct kv_reset_save_regs kv_save = { 0 };
|
||||
u32 i;
|
||||
int r = -EINVAL;
|
||||
|
||||
dev_info(adev->dev, "GPU pci config reset\n");
|
||||
amdgpu_atombios_scratch_regs_engine_hung(adev, true);
|
||||
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
kv_save_regs_for_reset(adev, &kv_save);
|
||||
@ -1285,26 +1294,6 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
kv_restore_regs_for_reset(adev, &kv_save);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_asic_pci_config_reset - soft reset GPU
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Use PCI Config method to reset the GPU.
|
||||
*
|
||||
* Returns 0 for success.
|
||||
*/
|
||||
static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
amdgpu_atombios_scratch_regs_engine_hung(adev, true);
|
||||
|
||||
r = cik_gpu_pci_config_reset(adev);
|
||||
|
||||
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
|
||||
|
||||
return r;
|
||||
|
@ -70,6 +70,11 @@
|
||||
#define GB_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8
|
||||
#define GB_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L
|
||||
|
||||
#define mmCGTS_TCC_DISABLE_gc_10_3 0x5006
|
||||
#define mmCGTS_TCC_DISABLE_gc_10_3_BASE_IDX 1
|
||||
#define mmCGTS_USER_TCC_DISABLE_gc_10_3 0x5007
|
||||
#define mmCGTS_USER_TCC_DISABLE_gc_10_3_BASE_IDX 1
|
||||
|
||||
#define mmCP_MEC_CNTL_Sienna_Cichlid 0x0f55
|
||||
#define mmCP_MEC_CNTL_Sienna_Cichlid_BASE_IDX 0
|
||||
#define mmRLC_SAFE_MODE_Sienna_Cichlid 0x4ca0
|
||||
@ -98,10 +103,6 @@
|
||||
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid 0x1580
|
||||
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX 0
|
||||
|
||||
#define mmCGTS_TCC_DISABLE_Vangogh 0x5006
|
||||
#define mmCGTS_TCC_DISABLE_Vangogh_BASE_IDX 1
|
||||
#define mmCGTS_USER_TCC_DISABLE_Vangogh 0x5007
|
||||
#define mmCGTS_USER_TCC_DISABLE_Vangogh_BASE_IDX 1
|
||||
#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh 0x0025
|
||||
#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX 1
|
||||
#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh 0x0026
|
||||
@ -4491,8 +4492,7 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
|
||||
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
|
||||
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
|
||||
+ ring->pipe;
|
||||
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
|
||||
ring->queue) ?
|
||||
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
|
||||
AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
|
||||
/* type-2 packets are deprecated on MEC, use type-3 instead */
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
@ -4939,15 +4939,12 @@ static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
|
||||
/* TCCs are global (not instanced). */
|
||||
uint32_t tcc_disable;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VANGOGH:
|
||||
tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_Vangogh) |
|
||||
RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_Vangogh);
|
||||
break;
|
||||
default:
|
||||
if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
|
||||
tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_gc_10_3) |
|
||||
RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_gc_10_3);
|
||||
} else {
|
||||
tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
|
||||
RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
|
||||
break;
|
||||
RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
|
||||
}
|
||||
|
||||
adev->gfx.config.tcc_disabled_mask =
|
||||
@ -6544,8 +6541,7 @@ static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
|
||||
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
|
||||
ring->queue)) {
|
||||
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
|
||||
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
|
||||
mqd->cp_hqd_queue_priority =
|
||||
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
|
||||
|
@ -29,6 +29,7 @@
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_gfx.h"
|
||||
#include "amdgpu_ring.h"
|
||||
#include "vi.h"
|
||||
#include "vi_structs.h"
|
||||
#include "vid.h"
|
||||
@ -1923,8 +1924,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
|
||||
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
|
||||
+ ring->pipe;
|
||||
|
||||
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
|
||||
ring->queue) ?
|
||||
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
|
||||
AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT;
|
||||
/* type-2 packets are deprecated on MEC, use type-3 instead */
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
@ -4442,8 +4442,7 @@ static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *m
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
|
||||
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
|
||||
ring->queue)) {
|
||||
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
|
||||
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
|
||||
mqd->cp_hqd_queue_priority =
|
||||
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
|
||||
@ -6847,6 +6846,66 @@ static void gfx_v8_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
|
||||
amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
|
||||
}
|
||||
|
||||
|
||||
/* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
|
||||
#define mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT 0x0000007f
|
||||
static void gfx_v8_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
|
||||
uint32_t pipe, bool enable)
|
||||
{
|
||||
uint32_t val;
|
||||
uint32_t wcl_cs_reg;
|
||||
|
||||
val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT;
|
||||
|
||||
switch (pipe) {
|
||||
case 0:
|
||||
wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS0;
|
||||
break;
|
||||
case 1:
|
||||
wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS1;
|
||||
break;
|
||||
case 2:
|
||||
wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS2;
|
||||
break;
|
||||
case 3:
|
||||
wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS3;
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG("invalid pipe %d\n", pipe);
|
||||
return;
|
||||
}
|
||||
|
||||
amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
|
||||
|
||||
}
|
||||
|
||||
#define mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT 0x07ffffff
|
||||
static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t val;
|
||||
int i;
|
||||
|
||||
/* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
|
||||
* number of gfx waves. Setting 5 bit will make sure gfx only gets
|
||||
* around 25% of gpu resources.
|
||||
*/
|
||||
val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
|
||||
amdgpu_ring_emit_wreg(ring, mmSPI_WCL_PIPE_PERCENT_GFX, val);
|
||||
|
||||
/* Restrict waves for normal/low priority compute queues as well
|
||||
* to get best QoS for high priority compute jobs.
|
||||
*
|
||||
* amdgpu controls only 1st ME(0-3 CS pipes).
|
||||
*/
|
||||
for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
|
||||
if (i != ring->pipe)
|
||||
gfx_v8_0_emit_wave_limit_cs(ring, i, enable);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
|
||||
.name = "gfx_v8_0",
|
||||
.early_init = gfx_v8_0_early_init,
|
||||
@ -6930,7 +6989,9 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
|
||||
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
|
||||
VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
|
||||
7 + 7 + 7 + /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
|
||||
7, /* gfx_v8_0_emit_mem_sync_compute */
|
||||
7 + /* gfx_v8_0_emit_mem_sync_compute */
|
||||
5 + /* gfx_v8_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
|
||||
15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
|
||||
.emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */
|
||||
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
|
||||
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
|
||||
@ -6944,6 +7005,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.emit_wreg = gfx_v8_0_ring_emit_wreg,
|
||||
.emit_mem_sync = gfx_v8_0_emit_mem_sync_compute,
|
||||
.emit_wave_limit = gfx_v8_0_emit_wave_limit,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
|
||||
|
@ -52,6 +52,7 @@
|
||||
|
||||
#include "asic_reg/pwr/pwr_10_0_offset.h"
|
||||
#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
|
||||
#include "asic_reg/gc/gc_9_0_default.h"
|
||||
|
||||
#define GFX9_NUM_GFX_RINGS 1
|
||||
#define GFX9_MEC_HPD_SIZE 4096
|
||||
@ -2227,8 +2228,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
|
||||
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
|
||||
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
|
||||
+ ring->pipe;
|
||||
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
|
||||
ring->queue) ?
|
||||
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
|
||||
AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
|
||||
/* type-2 packets are deprecated on MEC, use type-3 instead */
|
||||
return amdgpu_ring_init(adev, ring, 1024,
|
||||
@ -3390,9 +3390,7 @@ static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *m
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
|
||||
if (amdgpu_gfx_is_high_priority_compute_queue(adev,
|
||||
ring->pipe,
|
||||
ring->queue)) {
|
||||
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
|
||||
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
|
||||
mqd->cp_hqd_queue_priority =
|
||||
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
|
||||
@ -6670,6 +6668,65 @@ static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
|
||||
amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
|
||||
}
|
||||
|
||||
static void gfx_v9_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
|
||||
uint32_t pipe, bool enable)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t val;
|
||||
uint32_t wcl_cs_reg;
|
||||
|
||||
/* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
|
||||
val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS0_DEFAULT;
|
||||
|
||||
switch (pipe) {
|
||||
case 0:
|
||||
wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS0);
|
||||
break;
|
||||
case 1:
|
||||
wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS1);
|
||||
break;
|
||||
case 2:
|
||||
wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS2);
|
||||
break;
|
||||
case 3:
|
||||
wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS3);
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG("invalid pipe %d\n", pipe);
|
||||
return;
|
||||
}
|
||||
|
||||
amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
|
||||
|
||||
}
|
||||
static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t val;
|
||||
int i;
|
||||
|
||||
|
||||
/* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
|
||||
* number of gfx waves. Setting 5 bit will make sure gfx only gets
|
||||
* around 25% of gpu resources.
|
||||
*/
|
||||
val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
|
||||
amdgpu_ring_emit_wreg(ring,
|
||||
SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX),
|
||||
val);
|
||||
|
||||
/* Restrict waves for normal/low priority compute queues as well
|
||||
* to get best QoS for high priority compute jobs.
|
||||
*
|
||||
* amdgpu controls only 1st ME(0-3 CS pipes).
|
||||
*/
|
||||
for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
|
||||
if (i != ring->pipe)
|
||||
gfx_v9_0_emit_wave_limit_cs(ring, i, enable);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
|
||||
.name = "gfx_v9_0",
|
||||
.early_init = gfx_v9_0_early_init,
|
||||
@ -6759,7 +6816,9 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
|
||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
|
||||
2 + /* gfx_v9_0_ring_emit_vm_flush */
|
||||
8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
7, /* gfx_v9_0_emit_mem_sync */
|
||||
7 + /* gfx_v9_0_emit_mem_sync */
|
||||
5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
|
||||
15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
|
||||
.emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
|
||||
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
|
||||
.emit_fence = gfx_v9_0_ring_emit_fence,
|
||||
@ -6775,6 +6834,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
|
||||
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
|
||||
.emit_mem_sync = gfx_v9_0_emit_mem_sync,
|
||||
.emit_wave_limit = gfx_v9_0_emit_wave_limit,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
|
||||
|
@ -239,46 +239,44 @@ static const char *mmhub_client_ids_vega20[][2] = {
|
||||
};
|
||||
|
||||
static const char *mmhub_client_ids_arcturus[][2] = {
|
||||
[0][0] = "DBGU1",
|
||||
[1][0] = "XDP",
|
||||
[2][0] = "MP1",
|
||||
[3][0] = "MP0",
|
||||
[10][0] = "UTCL2",
|
||||
[13][0] = "OSS",
|
||||
[14][0] = "HDP",
|
||||
[15][0] = "SDMA0",
|
||||
[32+15][0] = "SDMA1",
|
||||
[64+15][0] = "SDMA2",
|
||||
[96+15][0] = "SDMA3",
|
||||
[128+15][0] = "SDMA4",
|
||||
[160+11][0] = "JPEG",
|
||||
[160+12][0] = "VCN",
|
||||
[160+13][0] = "VCNU",
|
||||
[160+15][0] = "SDMA5",
|
||||
[192+10][0] = "UTCL2",
|
||||
[192+11][0] = "JPEG1",
|
||||
[192+12][0] = "VCN1",
|
||||
[192+13][0] = "VCN1U",
|
||||
[192+15][0] = "SDMA6",
|
||||
[224+15][0] = "SDMA7",
|
||||
[171][0] = "JPEG",
|
||||
[172][0] = "VCN",
|
||||
[173][0] = "VCNU",
|
||||
[203][0] = "JPEG1",
|
||||
[204][0] = "VCN1",
|
||||
[205][0] = "VCN1U",
|
||||
[256][0] = "SDMA0",
|
||||
[257][0] = "SDMA1",
|
||||
[258][0] = "SDMA2",
|
||||
[259][0] = "SDMA3",
|
||||
[260][0] = "SDMA4",
|
||||
[261][0] = "SDMA5",
|
||||
[262][0] = "SDMA6",
|
||||
[263][0] = "SDMA7",
|
||||
[384][0] = "OSS",
|
||||
[0][1] = "DBGU1",
|
||||
[1][1] = "XDP",
|
||||
[2][1] = "MP1",
|
||||
[3][1] = "MP0",
|
||||
[13][1] = "OSS",
|
||||
[14][1] = "HDP",
|
||||
[15][1] = "SDMA0",
|
||||
[32+15][1] = "SDMA1",
|
||||
[64+15][1] = "SDMA2",
|
||||
[96+15][1] = "SDMA3",
|
||||
[128+15][1] = "SDMA4",
|
||||
[160+11][1] = "JPEG",
|
||||
[160+12][1] = "VCN",
|
||||
[160+13][1] = "VCNU",
|
||||
[160+15][1] = "SDMA5",
|
||||
[192+11][1] = "JPEG1",
|
||||
[192+12][1] = "VCN1",
|
||||
[192+13][1] = "VCN1U",
|
||||
[192+15][1] = "SDMA6",
|
||||
[224+15][1] = "SDMA7",
|
||||
[171][1] = "JPEG",
|
||||
[172][1] = "VCN",
|
||||
[173][1] = "VCNU",
|
||||
[203][1] = "JPEG1",
|
||||
[204][1] = "VCN1",
|
||||
[205][1] = "VCN1U",
|
||||
[256][1] = "SDMA0",
|
||||
[257][1] = "SDMA1",
|
||||
[258][1] = "SDMA2",
|
||||
[259][1] = "SDMA3",
|
||||
[260][1] = "SDMA4",
|
||||
[261][1] = "SDMA5",
|
||||
[262][1] = "SDMA6",
|
||||
[263][1] = "SDMA7",
|
||||
[384][1] = "OSS",
|
||||
};
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
|
||||
|
@ -34,6 +34,14 @@
|
||||
#define smnCPM_CONTROL 0x11180460
|
||||
#define smnPCIE_CNTL2 0x11180070
|
||||
#define smnPCIE_LC_CNTL 0x11140280
|
||||
#define smnPCIE_LC_CNTL3 0x111402d4
|
||||
#define smnPCIE_LC_CNTL6 0x111402ec
|
||||
#define smnPCIE_LC_CNTL7 0x111402f0
|
||||
#define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c
|
||||
#define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123538
|
||||
#define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324
|
||||
#define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4
|
||||
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
|
||||
|
||||
#define mmBIF_SDMA2_DOORBELL_RANGE 0x01d6
|
||||
#define mmBIF_SDMA2_DOORBELL_RANGE_BASE_IDX 2
|
||||
@ -350,6 +358,111 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
|
||||
WREG32_PCIE(smnPCIE_LC_CNTL, data);
|
||||
}
|
||||
|
||||
static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t def, data;
|
||||
|
||||
WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB);
|
||||
|
||||
def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP2);
|
||||
data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK;
|
||||
if (def != data)
|
||||
WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP2, data);
|
||||
|
||||
def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL);
|
||||
data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK;
|
||||
if (def != data)
|
||||
WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data);
|
||||
|
||||
def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
|
||||
data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
|
||||
if (def != data)
|
||||
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
|
||||
}
|
||||
|
||||
static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t def, data;
|
||||
|
||||
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
|
||||
data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
|
||||
data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
|
||||
data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
|
||||
if (def != data)
|
||||
WREG32_PCIE(smnPCIE_LC_CNTL, data);
|
||||
|
||||
def = data = RREG32_PCIE(smnPCIE_LC_CNTL7);
|
||||
data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK;
|
||||
if (def != data)
|
||||
WREG32_PCIE(smnPCIE_LC_CNTL7, data);
|
||||
|
||||
def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
|
||||
data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK;
|
||||
if (def != data)
|
||||
WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data);
|
||||
|
||||
def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
|
||||
data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
|
||||
if (def != data)
|
||||
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
|
||||
|
||||
def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
|
||||
data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK;
|
||||
data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK;
|
||||
if (def != data)
|
||||
WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3, data);
|
||||
|
||||
def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5);
|
||||
data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK;
|
||||
if (def != data)
|
||||
WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data);
|
||||
|
||||
def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
|
||||
data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
|
||||
if (def != data)
|
||||
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
|
||||
|
||||
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001);
|
||||
|
||||
def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2);
|
||||
data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
|
||||
PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
|
||||
data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK;
|
||||
if (def != data)
|
||||
WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data);
|
||||
|
||||
def = data = RREG32_PCIE(smnPCIE_LC_CNTL6);
|
||||
data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK |
|
||||
PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK;
|
||||
if (def != data)
|
||||
WREG32_PCIE(smnPCIE_LC_CNTL6, data);
|
||||
|
||||
nbio_v2_3_program_ltr(adev);
|
||||
|
||||
def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
|
||||
data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
|
||||
data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT;
|
||||
if (def != data)
|
||||
WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3, data);
|
||||
|
||||
def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5);
|
||||
data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT;
|
||||
if (def != data)
|
||||
WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data);
|
||||
|
||||
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
|
||||
data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
|
||||
data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
|
||||
data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT;
|
||||
if (def != data)
|
||||
WREG32_PCIE(smnPCIE_LC_CNTL, data);
|
||||
|
||||
def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
|
||||
data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
|
||||
if (def != data)
|
||||
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
|
||||
}
|
||||
|
||||
const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
|
||||
.get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
|
||||
.get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
|
||||
@ -370,4 +483,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
|
||||
.init_registers = nbio_v2_3_init_registers,
|
||||
.remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
|
||||
.enable_aspm = nbio_v2_3_enable_aspm,
|
||||
.program_aspm = nbio_v2_3_program_aspm,
|
||||
};
|
||||
|
@ -38,7 +38,6 @@
|
||||
|
||||
#include "gc/gc_10_1_0_offset.h"
|
||||
#include "gc/gc_10_1_0_sh_mask.h"
|
||||
#include "smuio/smuio_11_0_0_offset.h"
|
||||
#include "mp/mp_11_0_offset.h"
|
||||
|
||||
#include "soc15.h"
|
||||
@ -61,6 +60,8 @@
|
||||
#include "dce_virtual.h"
|
||||
#include "mes_v10_1.h"
|
||||
#include "mxgpu_nv.h"
|
||||
#include "smuio_v11_0.h"
|
||||
#include "smuio_v11_0_6.h"
|
||||
|
||||
static const struct amd_ip_funcs nv_common_ip_funcs;
|
||||
|
||||
@ -202,6 +203,7 @@ static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
|
||||
{
|
||||
u32 *dw_ptr;
|
||||
u32 i, length_dw;
|
||||
u32 rom_index_offset, rom_data_offset;
|
||||
|
||||
if (bios == NULL)
|
||||
return false;
|
||||
@ -214,11 +216,16 @@ static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
|
||||
dw_ptr = (u32 *)bios;
|
||||
length_dw = ALIGN(length_bytes, 4) / 4;
|
||||
|
||||
rom_index_offset =
|
||||
adev->smuio.funcs->get_rom_index_offset(adev);
|
||||
rom_data_offset =
|
||||
adev->smuio.funcs->get_rom_data_offset(adev);
|
||||
|
||||
/* set rom index to 0 */
|
||||
WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
|
||||
WREG32(rom_index_offset, 0);
|
||||
/* read out the rom data */
|
||||
for (i = 0; i < length_dw; i++)
|
||||
dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
|
||||
dw_ptr[i] = RREG32(rom_data_offset);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -384,7 +391,8 @@ nv_asic_reset_method(struct amdgpu_device *adev)
|
||||
|
||||
if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
|
||||
amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
|
||||
amdgpu_reset_method == AMD_RESET_METHOD_BACO)
|
||||
amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
|
||||
amdgpu_reset_method == AMD_RESET_METHOD_PCI)
|
||||
return amdgpu_reset_method;
|
||||
|
||||
if (amdgpu_reset_method != -1)
|
||||
@ -416,6 +424,10 @@ static int nv_asic_reset(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
|
||||
switch (nv_asic_reset_method(adev)) {
|
||||
case AMD_RESET_METHOD_PCI:
|
||||
dev_info(adev->dev, "PCI reset\n");
|
||||
ret = amdgpu_device_pci_reset(adev);
|
||||
break;
|
||||
case AMD_RESET_METHOD_BACO:
|
||||
dev_info(adev->dev, "BACO reset\n");
|
||||
|
||||
@ -468,11 +480,14 @@ static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
|
||||
|
||||
static void nv_program_aspm(struct amdgpu_device *adev)
|
||||
{
|
||||
|
||||
if (amdgpu_aspm == 0)
|
||||
if (amdgpu_aspm != 1)
|
||||
return;
|
||||
|
||||
/* todo */
|
||||
if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
|
||||
!(adev->flags & AMD_IS_APU) &&
|
||||
(adev->nbio.funcs->program_aspm))
|
||||
adev->nbio.funcs->program_aspm(adev);
|
||||
|
||||
}
|
||||
|
||||
static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
|
||||
@ -561,6 +576,11 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
||||
}
|
||||
adev->hdp.funcs = &hdp_v5_0_funcs;
|
||||
|
||||
if (adev->asic_type >= CHIP_SIENNA_CICHLID)
|
||||
adev->smuio.funcs = &smuio_v11_0_6_funcs;
|
||||
else
|
||||
adev->smuio.funcs = &smuio_v11_0_funcs;
|
||||
|
||||
if (adev->asic_type == CHIP_SIENNA_CICHLID)
|
||||
adev->gmc.xgmi.supported = true;
|
||||
|
||||
@ -798,10 +818,10 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
|
||||
* The ASPM function is not fully enabled and verified on
|
||||
* Navi yet. Temporarily skip this until ASPM enabled.
|
||||
*/
|
||||
#if 0
|
||||
if (adev->nbio.funcs->enable_aspm)
|
||||
if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
|
||||
!(adev->flags & AMD_IS_APU) &&
|
||||
(adev->nbio.funcs->enable_aspm))
|
||||
adev->nbio.funcs->enable_aspm(adev, !enter);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1129,6 +1149,8 @@ static int nv_common_set_clockgating_state(void *handle,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
adev->hdp.funcs->update_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
adev->smuio.funcs->update_rom_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -1154,6 +1176,8 @@ static void nv_common_get_clockgating_state(void *handle, u32 *flags)
|
||||
|
||||
adev->hdp.funcs->get_clock_gating_state(adev, flags);
|
||||
|
||||
adev->smuio.funcs->get_clock_gating_state(adev, flags);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1270,7 +1270,7 @@ static int si_gpu_pci_config_reset(struct amdgpu_device *adev)
|
||||
u32 i;
|
||||
int r = -EINVAL;
|
||||
|
||||
dev_info(adev->dev, "GPU pci config reset\n");
|
||||
amdgpu_atombios_scratch_regs_engine_hung(adev, true);
|
||||
|
||||
/* set mclk/sclk to bypass */
|
||||
si_set_clk_bypass_mode(adev);
|
||||
@ -1294,20 +1294,6 @@ static int si_gpu_pci_config_reset(struct amdgpu_device *adev)
|
||||
}
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int si_asic_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
dev_info(adev->dev, "PCI CONFIG reset\n");
|
||||
|
||||
amdgpu_atombios_scratch_regs_engine_hung(adev, true);
|
||||
|
||||
r = si_gpu_pci_config_reset(adev);
|
||||
|
||||
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
|
||||
|
||||
return r;
|
||||
@ -1321,14 +1307,34 @@ static bool si_asic_supports_baco(struct amdgpu_device *adev)
|
||||
static enum amd_reset_method
|
||||
si_asic_reset_method(struct amdgpu_device *adev)
|
||||
{
|
||||
if (amdgpu_reset_method != AMD_RESET_METHOD_LEGACY &&
|
||||
amdgpu_reset_method != -1)
|
||||
if (amdgpu_reset_method == AMD_RESET_METHOD_PCI)
|
||||
return amdgpu_reset_method;
|
||||
else if (amdgpu_reset_method != AMD_RESET_METHOD_LEGACY &&
|
||||
amdgpu_reset_method != -1)
|
||||
dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
|
||||
amdgpu_reset_method);
|
||||
amdgpu_reset_method);
|
||||
|
||||
return AMD_RESET_METHOD_LEGACY;
|
||||
}
|
||||
|
||||
static int si_asic_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
switch (si_asic_reset_method(adev)) {
|
||||
case AMD_RESET_METHOD_PCI:
|
||||
dev_info(adev->dev, "PCI reset\n");
|
||||
r = amdgpu_device_pci_reset(adev);
|
||||
break;
|
||||
default:
|
||||
dev_info(adev->dev, "PCI CONFIG reset\n");
|
||||
r = si_gpu_pci_config_reset(adev);
|
||||
break;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static u32 si_get_config_memsize(struct amdgpu_device *adev)
|
||||
{
|
||||
return RREG32(mmCONFIG_MEMSIZE);
|
||||
|
77
drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c
Normal file
77
drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c
Normal file
@ -0,0 +1,77 @@
|
||||
/*
|
||||
* Copyright 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include "amdgpu.h"
|
||||
#include "smuio_v11_0_6.h"
|
||||
#include "smuio/smuio_11_0_6_offset.h"
|
||||
#include "smuio/smuio_11_0_6_sh_mask.h"
|
||||
|
||||
static u32 smuio_v11_0_6_get_rom_index_offset(struct amdgpu_device *adev)
|
||||
{
|
||||
return SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
|
||||
}
|
||||
|
||||
static u32 smuio_v11_0_6_get_rom_data_offset(struct amdgpu_device *adev)
|
||||
{
|
||||
return SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
|
||||
}
|
||||
|
||||
static void smuio_v11_0_6_update_rom_clock_gating(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
u32 def, data;
|
||||
|
||||
/* enable/disable ROM CG is not supported on APU */
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return;
|
||||
|
||||
def = data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0);
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
|
||||
data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
|
||||
CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
|
||||
else
|
||||
data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
|
||||
CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0, data);
|
||||
}
|
||||
|
||||
static void smuio_v11_0_6_get_clock_gating_state(struct amdgpu_device *adev, u32 *flags)
|
||||
{
|
||||
u32 data;
|
||||
|
||||
/* CGTT_ROM_CLK_CTRL0 is not available for APU */
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return;
|
||||
|
||||
data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0);
|
||||
if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
|
||||
*flags |= AMD_CG_SUPPORT_ROM_MGCG;
|
||||
}
|
||||
|
||||
const struct amdgpu_smuio_funcs smuio_v11_0_6_funcs = {
|
||||
.get_rom_index_offset = smuio_v11_0_6_get_rom_index_offset,
|
||||
.get_rom_data_offset = smuio_v11_0_6_get_rom_data_offset,
|
||||
.update_rom_clock_gating = smuio_v11_0_6_update_rom_clock_gating,
|
||||
.get_clock_gating_state = smuio_v11_0_6_get_clock_gating_state,
|
||||
};
|
30
drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h
Normal file
30
drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h
Normal file
@ -0,0 +1,30 @@
|
||||
/*
|
||||
* Copyright 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#ifndef __SMUIO_V11_0_6_H__
|
||||
#define __SMUIO_V11_0_6_H__
|
||||
|
||||
#include "soc15_common.h"
|
||||
|
||||
extern const struct amdgpu_smuio_funcs smuio_v11_0_6_funcs;
|
||||
|
||||
#endif /* __SMUIO_V11_0_6_H__ */
|
@ -233,6 +233,8 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 reference_clock = adev->clock.spll.reference_freq;
|
||||
|
||||
if (adev->asic_type == CHIP_RENOIR)
|
||||
return 10000;
|
||||
if (adev->asic_type == CHIP_RAVEN)
|
||||
return reference_clock / 4;
|
||||
|
||||
@ -479,7 +481,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
|
||||
|
||||
if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
|
||||
amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
|
||||
amdgpu_reset_method == AMD_RESET_METHOD_BACO)
|
||||
amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
|
||||
amdgpu_reset_method == AMD_RESET_METHOD_PCI)
|
||||
return amdgpu_reset_method;
|
||||
|
||||
if (amdgpu_reset_method != -1)
|
||||
@ -524,15 +527,18 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
|
||||
switch (soc15_asic_reset_method(adev)) {
|
||||
case AMD_RESET_METHOD_BACO:
|
||||
dev_info(adev->dev, "BACO reset\n");
|
||||
return soc15_asic_baco_reset(adev);
|
||||
case AMD_RESET_METHOD_MODE2:
|
||||
dev_info(adev->dev, "MODE2 reset\n");
|
||||
return amdgpu_dpm_mode2_reset(adev);
|
||||
default:
|
||||
dev_info(adev->dev, "MODE1 reset\n");
|
||||
return soc15_asic_mode1_reset(adev);
|
||||
case AMD_RESET_METHOD_PCI:
|
||||
dev_info(adev->dev, "PCI reset\n");
|
||||
return amdgpu_device_pci_reset(adev);
|
||||
case AMD_RESET_METHOD_BACO:
|
||||
dev_info(adev->dev, "BACO reset\n");
|
||||
return soc15_asic_baco_reset(adev);
|
||||
case AMD_RESET_METHOD_MODE2:
|
||||
dev_info(adev->dev, "MODE2 reset\n");
|
||||
return amdgpu_dpm_mode2_reset(adev);
|
||||
default:
|
||||
dev_info(adev->dev, "MODE1 reset\n");
|
||||
return soc15_asic_mode1_reset(adev);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -642,11 +642,21 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
|
||||
/**
|
||||
* vi_asic_pci_config_reset - soft reset GPU
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Use PCI Config method to reset the GPU.
|
||||
*
|
||||
* Returns 0 for success.
|
||||
*/
|
||||
static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 i;
|
||||
int r = -EINVAL;
|
||||
|
||||
dev_info(adev->dev, "GPU pci config reset\n");
|
||||
amdgpu_atombios_scratch_regs_engine_hung(adev, true);
|
||||
|
||||
/* disable BM */
|
||||
pci_clear_master(adev->pdev);
|
||||
@ -661,29 +671,11 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
|
||||
/* enable BM */
|
||||
pci_set_master(adev->pdev);
|
||||
adev->has_hw_reset = true;
|
||||
return 0;
|
||||
r = 0;
|
||||
break;
|
||||
}
|
||||
udelay(1);
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* vi_asic_pci_config_reset - soft reset GPU
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Use PCI Config method to reset the GPU.
|
||||
*
|
||||
* Returns 0 for success.
|
||||
*/
|
||||
static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
amdgpu_atombios_scratch_regs_engine_hung(adev, true);
|
||||
|
||||
r = vi_gpu_pci_config_reset(adev);
|
||||
|
||||
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
|
||||
|
||||
|
@ -243,11 +243,11 @@ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
|
||||
static inline void dqm_lock(struct device_queue_manager *dqm)
|
||||
{
|
||||
mutex_lock(&dqm->lock_hidden);
|
||||
dqm->saved_flags = memalloc_nofs_save();
|
||||
dqm->saved_flags = memalloc_noreclaim_save();
|
||||
}
|
||||
static inline void dqm_unlock(struct device_queue_manager *dqm)
|
||||
{
|
||||
memalloc_nofs_restore(dqm->saved_flags);
|
||||
memalloc_noreclaim_restore(dqm->saved_flags);
|
||||
mutex_unlock(&dqm->lock_hidden);
|
||||
}
|
||||
|
||||
|
@ -497,8 +497,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
|
||||
dev->node_props.num_sdma_queues_per_engine);
|
||||
sysfs_show_32bit_prop(buffer, offs, "num_cp_queues",
|
||||
dev->node_props.num_cp_queues);
|
||||
sysfs_show_64bit_prop(buffer, offs, "unique_id",
|
||||
dev->node_props.unique_id);
|
||||
|
||||
if (dev->gpu) {
|
||||
log_max_watch_addr =
|
||||
@ -529,6 +527,9 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
|
||||
dev->node_props.capability);
|
||||
sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version",
|
||||
dev->gpu->sdma_fw_version);
|
||||
sysfs_show_64bit_prop(buffer, offs, "unique_id",
|
||||
amdgpu_amdkfd_get_unique_id(dev->gpu->kgd));
|
||||
|
||||
}
|
||||
|
||||
return sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_ccompute",
|
||||
@ -1340,7 +1341,6 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
|
||||
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
|
||||
amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
|
||||
dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
|
||||
dev->node_props.unique_id = amdgpu_amdkfd_get_unique_id(dev->gpu->kgd);
|
||||
|
||||
kfd_fill_mem_clk_max_info(dev);
|
||||
kfd_fill_iolink_non_crat_info(dev);
|
||||
|
@ -57,7 +57,6 @@
|
||||
|
||||
struct kfd_node_properties {
|
||||
uint64_t hive_id;
|
||||
uint64_t unique_id;
|
||||
uint32_t cpu_cores_count;
|
||||
uint32_t simd_count;
|
||||
uint32_t mem_banks_count;
|
||||
|
@ -1128,7 +1128,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
if (adev->dm.hdcp_workqueue) {
|
||||
hdcp_destroy(adev->dm.hdcp_workqueue);
|
||||
hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
|
||||
adev->dm.hdcp_workqueue = NULL;
|
||||
}
|
||||
|
||||
@ -5374,9 +5374,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
|
||||
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
struct amdgpu_display_manager *dm = &adev->dm;
|
||||
#endif
|
||||
int rc = 0;
|
||||
|
||||
if (enable) {
|
||||
@ -5396,7 +5394,6 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
|
||||
if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
|
||||
return -EBUSY;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (amdgpu_in_reset(adev))
|
||||
return 0;
|
||||
|
||||
@ -5416,7 +5413,6 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
|
||||
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -376,7 +376,7 @@ static void event_cpirq(struct work_struct *work)
|
||||
}
|
||||
|
||||
|
||||
void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
|
||||
void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
@ -385,6 +385,7 @@ void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
|
||||
cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
|
||||
}
|
||||
|
||||
sysfs_remove_bin_file(kobj, &hdcp_work[0].attr);
|
||||
kfree(hdcp_work->srm);
|
||||
kfree(hdcp_work->srm_temp);
|
||||
kfree(hdcp_work);
|
||||
|
@ -69,7 +69,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
|
||||
|
||||
void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index);
|
||||
void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index);
|
||||
void hdcp_destroy(struct hdcp_workqueue *work);
|
||||
void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *work);
|
||||
|
||||
struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc);
|
||||
|
||||
|
@ -526,11 +526,11 @@ bool dm_helpers_submit_i2c(
|
||||
bool dm_helpers_dp_write_dsc_enable(
|
||||
struct dc_context *ctx,
|
||||
const struct dc_stream_state *stream,
|
||||
bool enable
|
||||
)
|
||||
bool enable)
|
||||
{
|
||||
uint8_t enable_dsc = enable ? 1 : 0;
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
uint8_t ret;
|
||||
|
||||
if (!stream)
|
||||
return false;
|
||||
@ -541,13 +541,13 @@ bool dm_helpers_dp_write_dsc_enable(
|
||||
if (!aconnector->dsc_aux)
|
||||
return false;
|
||||
|
||||
return (drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1) >= 0);
|
||||
ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1);
|
||||
}
|
||||
|
||||
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
|
||||
return dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
|
||||
|
||||
return false;
|
||||
return (ret > 0);
|
||||
}
|
||||
|
||||
bool dm_helpers_is_dp_sink_present(struct dc_link *link)
|
||||
|
@ -662,6 +662,20 @@ static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
|
||||
__func__);
|
||||
}
|
||||
|
||||
static int amdgpu_dm_set_vline0_irq_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
unsigned int crtc_id,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
return dm_irq_state(
|
||||
adev,
|
||||
source,
|
||||
crtc_id,
|
||||
state,
|
||||
IRQ_TYPE_VLINE0,
|
||||
__func__);
|
||||
}
|
||||
|
||||
static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
unsigned int crtc_id,
|
||||
@ -681,6 +695,11 @@ static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
|
||||
.process = amdgpu_dm_irq_handler,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs dm_vline0_irq_funcs = {
|
||||
.set = amdgpu_dm_set_vline0_irq_state,
|
||||
.process = amdgpu_dm_irq_handler,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
|
||||
.set = amdgpu_dm_set_vupdate_irq_state,
|
||||
.process = amdgpu_dm_irq_handler,
|
||||
@ -702,6 +721,9 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
|
||||
adev->crtc_irq.num_types = adev->mode_info.num_crtc;
|
||||
adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
|
||||
|
||||
adev->vline0_irq.num_types = adev->mode_info.num_crtc;
|
||||
adev->vline0_irq.funcs = &dm_vline0_irq_funcs;
|
||||
|
||||
adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
|
||||
adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
|
||||
|
||||
|
@ -205,27 +205,9 @@ static bool create_links(
|
||||
link = link_create(&link_init_params);
|
||||
|
||||
if (link) {
|
||||
bool should_destory_link = false;
|
||||
|
||||
if (link->connector_signal == SIGNAL_TYPE_EDP) {
|
||||
if (dc->config.edp_not_connected) {
|
||||
if (!IS_DIAG_DC(dc->ctx->dce_environment))
|
||||
should_destory_link = true;
|
||||
} else {
|
||||
enum dc_connection_type type;
|
||||
dc_link_detect_sink(link, &type);
|
||||
if (type == dc_connection_none)
|
||||
should_destory_link = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (dc->config.force_enum_edp || !should_destory_link) {
|
||||
dc->links[dc->link_count] = link;
|
||||
link->dc = dc;
|
||||
++dc->link_count;
|
||||
} else {
|
||||
link_destroy(&link);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1016,8 +998,30 @@ destruct_dc:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void detect_edp_presence(struct dc *dc)
|
||||
{
|
||||
struct dc_link *edp_link = get_edp_link(dc);
|
||||
bool edp_sink_present = true;
|
||||
|
||||
if (!edp_link)
|
||||
return;
|
||||
|
||||
if (dc->config.edp_not_connected) {
|
||||
edp_sink_present = false;
|
||||
} else {
|
||||
enum dc_connection_type type;
|
||||
dc_link_detect_sink(edp_link, &type);
|
||||
if (type == dc_connection_none)
|
||||
edp_sink_present = false;
|
||||
}
|
||||
|
||||
edp_link->edp_sink_present = edp_sink_present;
|
||||
}
|
||||
|
||||
void dc_hardware_init(struct dc *dc)
|
||||
{
|
||||
|
||||
detect_edp_presence(dc);
|
||||
if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
|
||||
dc->hwss.init_hw(dc);
|
||||
}
|
||||
|
@ -3688,8 +3688,8 @@ uint32_t dc_link_bandwidth_kbps(
|
||||
* but the difference is minimal and is in a safe direction,
|
||||
* which all works well around potential ambiguity of DP 1.4a spec.
|
||||
*/
|
||||
link_bw_kbps = mul_u64_u32_shr(BIT_ULL(32) * 970LL / 1000,
|
||||
link_bw_kbps, 32);
|
||||
long long fec_link_bw_kbps = link_bw_kbps * 970LL;
|
||||
link_bw_kbps = (uint32_t)(div64_s64(fec_link_bw_kbps, 1000LL));
|
||||
}
|
||||
|
||||
return link_bw_kbps;
|
||||
|
@ -42,7 +42,7 @@
|
||||
#include "inc/hw/dmcu.h"
|
||||
#include "dml/display_mode_lib.h"
|
||||
|
||||
#define DC_VER "3.2.121"
|
||||
#define DC_VER "3.2.122"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_PLANES 6
|
||||
|
@ -51,7 +51,6 @@ struct dc_dsc_policy {
|
||||
int min_slice_height; // Must not be less than 8
|
||||
uint32_t max_target_bpp;
|
||||
uint32_t min_target_bpp;
|
||||
uint32_t preferred_bpp_x16;
|
||||
bool enable_dsc_when_not_needed;
|
||||
};
|
||||
|
||||
@ -63,8 +62,8 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
|
||||
bool dc_dsc_compute_bandwidth_range(
|
||||
const struct display_stream_compressor *dsc,
|
||||
uint32_t dsc_min_slice_height_override,
|
||||
uint32_t min_bpp_x16,
|
||||
uint32_t max_bpp_x16,
|
||||
uint32_t min_bpp,
|
||||
uint32_t max_bpp,
|
||||
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
|
||||
const struct dc_crtc_timing *timing,
|
||||
struct dc_dsc_bw_range *range);
|
||||
@ -79,7 +78,7 @@ bool dc_dsc_compute_config(
|
||||
struct dc_dsc_config *dsc_cfg);
|
||||
|
||||
void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
|
||||
uint32_t max_target_bpp_limit_override_x16,
|
||||
uint32_t max_target_bpp_limit_override,
|
||||
struct dc_dsc_policy *policy);
|
||||
|
||||
void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit);
|
||||
|
@ -769,7 +769,6 @@ struct dc_crtc_timing {
|
||||
#endif
|
||||
|
||||
struct dc_crtc_timing_flags flags;
|
||||
uint32_t dsc_fixed_bits_per_pixel_x16; /* DSC target bitrate in 1/16 of bpp (e.g. 128 -> 8bpp) */
|
||||
struct dc_dsc_config dsc_cfg;
|
||||
};
|
||||
|
||||
|
@ -103,6 +103,8 @@ struct dc_link {
|
||||
bool lttpr_non_transparent_mode;
|
||||
bool is_internal_display;
|
||||
|
||||
bool edp_sink_present;
|
||||
|
||||
/* caps is the same as reported_link_cap. link_traing use
|
||||
* reported_link_cap. Will clean up. TODO
|
||||
*/
|
||||
|
@ -240,7 +240,7 @@ static bool calc_fb_divider_checking_tolerance(
|
||||
pll_settings->calculated_pix_clk_100hz =
|
||||
actual_calculated_clock_100hz;
|
||||
pll_settings->vco_freq =
|
||||
actual_calculated_clock_100hz * post_divider / 10;
|
||||
div_u64((u64)actual_calculated_clock_100hz * post_divider, 10);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -57,6 +57,7 @@ static void dmub_abm_enable_fractional_pwm(struct dc_context *dc)
|
||||
union dmub_rb_cmd cmd;
|
||||
uint32_t fractional_pwm = (dc->dc->config.disable_fractional_pwm == false) ? 1 : 0;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.abm_set_pwm_frac.header.type = DMUB_CMD__ABM;
|
||||
cmd.abm_set_pwm_frac.header.sub_type = DMUB_CMD__ABM_SET_PWM_FRAC;
|
||||
cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.fractional_pwm = fractional_pwm;
|
||||
@ -135,6 +136,7 @@ static bool dmub_abm_set_level(struct abm *abm, uint32_t level)
|
||||
union dmub_rb_cmd cmd;
|
||||
struct dc_context *dc = abm->ctx;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.abm_set_level.header.type = DMUB_CMD__ABM;
|
||||
cmd.abm_set_level.header.sub_type = DMUB_CMD__ABM_SET_LEVEL;
|
||||
cmd.abm_set_level.abm_set_level_data.level = level;
|
||||
@ -160,6 +162,7 @@ static bool dmub_abm_init_config(struct abm *abm,
|
||||
// Copy iramtable into cw7
|
||||
memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)src, bytes);
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
// Fw will copy from cw7 to fw_state
|
||||
cmd.abm_init_config.header.type = DMUB_CMD__ABM;
|
||||
cmd.abm_init_config.header.sub_type = DMUB_CMD__ABM_INIT_CONFIG;
|
||||
|
@ -33,8 +33,9 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
|
||||
union dmub_hw_lock_flags *hw_locks,
|
||||
struct dmub_hw_lock_inst_flags *inst_flags)
|
||||
{
|
||||
union dmub_rb_cmd cmd = { 0 };
|
||||
union dmub_rb_cmd cmd;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.lock_hw.header.type = DMUB_CMD__HW_LOCK;
|
||||
cmd.lock_hw.header.sub_type = 0;
|
||||
cmd.lock_hw.header.payload_bytes = sizeof(struct dmub_cmd_lock_hw_data);
|
||||
|
@ -101,6 +101,7 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
|
||||
if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)
|
||||
return false;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.psr_set_version.header.type = DMUB_CMD__PSR;
|
||||
cmd.psr_set_version.header.sub_type = DMUB_CMD__PSR_SET_VERSION;
|
||||
switch (stream->link->psr_settings.psr_version) {
|
||||
@ -131,7 +132,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait)
|
||||
uint32_t retry_count;
|
||||
enum dc_psr_state state = PSR_STATE0;
|
||||
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.psr_enable.header.type = DMUB_CMD__PSR;
|
||||
|
||||
if (enable)
|
||||
@ -184,6 +185,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
|
||||
if (state == PSR_STATE0)
|
||||
return;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.psr_set_level.header.type = DMUB_CMD__PSR;
|
||||
cmd.psr_set_level.header.sub_type = DMUB_CMD__PSR_SET_LEVEL;
|
||||
cmd.psr_set_level.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_level_data);
|
||||
@ -233,6 +235,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
|
||||
link->link_enc->funcs->psr_program_secondary_packet(link->link_enc,
|
||||
psr_context->sdpTransmitLineNumDeadline);
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.psr_copy_settings.header.type = DMUB_CMD__PSR;
|
||||
cmd.psr_copy_settings.header.sub_type = DMUB_CMD__PSR_COPY_SETTINGS;
|
||||
cmd.psr_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_psr_copy_settings_data);
|
||||
@ -285,6 +288,7 @@ static void dmub_psr_force_static(struct dmub_psr *dmub)
|
||||
union dmub_rb_cmd cmd;
|
||||
struct dc_context *dc = dmub->ctx;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.psr_force_static.header.type = DMUB_CMD__PSR;
|
||||
cmd.psr_force_static.header.sub_type = DMUB_CMD__PSR_FORCE_STATIC;
|
||||
cmd.psr_enable.header.payload_bytes = 0;
|
||||
|
@ -47,7 +47,7 @@
|
||||
|
||||
unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...)
|
||||
{
|
||||
unsigned int ret_vsnprintf;
|
||||
int ret_vsnprintf;
|
||||
unsigned int chars_printed;
|
||||
|
||||
va_list args;
|
||||
|
@ -956,6 +956,21 @@ void dcn10_link_encoder_enable_tmds_output(
|
||||
}
|
||||
}
|
||||
|
||||
void dcn10_link_encoder_enable_tmds_output_with_clk_pattern_wa(
|
||||
struct link_encoder *enc,
|
||||
enum clock_source_id clock_source,
|
||||
enum dc_color_depth color_depth,
|
||||
enum signal_type signal,
|
||||
uint32_t pixel_clock)
|
||||
{
|
||||
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
|
||||
|
||||
dcn10_link_encoder_enable_tmds_output(
|
||||
enc, clock_source, color_depth, signal, pixel_clock);
|
||||
|
||||
REG_UPDATE(DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, 0x1F);
|
||||
}
|
||||
|
||||
/* enables DP PHY output */
|
||||
void dcn10_link_encoder_enable_dp_output(
|
||||
struct link_encoder *enc,
|
||||
|
@ -42,6 +42,7 @@
|
||||
#define LE_DCN_COMMON_REG_LIST(id) \
|
||||
SRI(DIG_BE_CNTL, DIG, id), \
|
||||
SRI(DIG_BE_EN_CNTL, DIG, id), \
|
||||
SRI(DIG_CLOCK_PATTERN, DIG, id), \
|
||||
SRI(TMDS_CTL_BITS, DIG, id), \
|
||||
SRI(DP_CONFIG, DP, id), \
|
||||
SRI(DP_DPHY_CNTL, DP, id), \
|
||||
@ -83,6 +84,7 @@ struct dcn10_link_enc_hpd_registers {
|
||||
struct dcn10_link_enc_registers {
|
||||
uint32_t DIG_BE_CNTL;
|
||||
uint32_t DIG_BE_EN_CNTL;
|
||||
uint32_t DIG_CLOCK_PATTERN;
|
||||
uint32_t DP_CONFIG;
|
||||
uint32_t DP_DPHY_CNTL;
|
||||
uint32_t DP_DPHY_INTERNAL_CTRL;
|
||||
@ -168,6 +170,7 @@ struct dcn10_link_enc_registers {
|
||||
LE_SF(DIG0_DIG_BE_CNTL, DIG_HPD_SELECT, mask_sh),\
|
||||
LE_SF(DIG0_DIG_BE_CNTL, DIG_MODE, mask_sh),\
|
||||
LE_SF(DIG0_DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, mask_sh),\
|
||||
LE_SF(DIG0_DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, mask_sh),\
|
||||
LE_SF(DIG0_TMDS_CTL_BITS, TMDS_CTL0, mask_sh), \
|
||||
LE_SF(DP0_DP_DPHY_CNTL, DPHY_BYPASS, mask_sh),\
|
||||
LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE0, mask_sh),\
|
||||
@ -218,6 +221,7 @@ struct dcn10_link_enc_registers {
|
||||
type DIG_HPD_SELECT;\
|
||||
type DIG_MODE;\
|
||||
type DIG_FE_SOURCE_SELECT;\
|
||||
type DIG_CLOCK_PATTERN;\
|
||||
type DPHY_BYPASS;\
|
||||
type DPHY_ATEST_SEL_LANE0;\
|
||||
type DPHY_ATEST_SEL_LANE1;\
|
||||
@ -536,6 +540,13 @@ void dcn10_link_encoder_enable_tmds_output(
|
||||
enum signal_type signal,
|
||||
uint32_t pixel_clock);
|
||||
|
||||
void dcn10_link_encoder_enable_tmds_output_with_clk_pattern_wa(
|
||||
struct link_encoder *enc,
|
||||
enum clock_source_id clock_source,
|
||||
enum dc_color_depth color_depth,
|
||||
enum signal_type signal,
|
||||
uint32_t pixel_clock);
|
||||
|
||||
/* enables DP PHY output */
|
||||
void dcn10_link_encoder_enable_dp_output(
|
||||
struct link_encoder *enc,
|
||||
|
@ -363,7 +363,7 @@ static const struct link_encoder_funcs dcn20_link_enc_funcs = {
|
||||
dcn10_link_encoder_validate_output_with_stream,
|
||||
.hw_init = enc2_hw_init,
|
||||
.setup = dcn10_link_encoder_setup,
|
||||
.enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
|
||||
.enable_tmds_output = dcn10_link_encoder_enable_tmds_output_with_clk_pattern_wa,
|
||||
.enable_dp_output = dcn20_link_encoder_enable_dp_output,
|
||||
.enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output,
|
||||
.disable_output = dcn10_link_encoder_disable_output,
|
||||
|
@ -297,8 +297,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
|
||||
},
|
||||
},
|
||||
.num_states = 5,
|
||||
.sr_exit_time_us = 11.6,
|
||||
.sr_enter_plus_exit_time_us = 13.9,
|
||||
.sr_exit_time_us = 8.6,
|
||||
.sr_enter_plus_exit_time_us = 10.9,
|
||||
.urgent_latency_us = 4.0,
|
||||
.urgent_latency_pixel_data_only_us = 4.0,
|
||||
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
|
||||
@ -2097,6 +2097,7 @@ int dcn20_populate_dml_pipes_from_context(
|
||||
pipes[pipe_cnt].pipe.dest.pixel_rate_mhz *= 2;
|
||||
pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst;
|
||||
pipes[pipe_cnt].dout.dp_lanes = 4;
|
||||
pipes[pipe_cnt].dout.is_virtual = 0;
|
||||
pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
|
||||
pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
|
||||
switch (get_num_odm_splits(&res_ctx->pipe_ctx[i])) {
|
||||
@ -2150,6 +2151,7 @@ int dcn20_populate_dml_pipes_from_context(
|
||||
break;
|
||||
default:
|
||||
/* In case there is no signal, set dp with 4 lanes to allow max config */
|
||||
pipes[pipe_cnt].dout.is_virtual = 1;
|
||||
pipes[pipe_cnt].dout.output_type = dm_dp;
|
||||
pipes[pipe_cnt].dout.dp_lanes = 4;
|
||||
}
|
||||
@ -3245,7 +3247,7 @@ restore_dml_state:
|
||||
bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
|
||||
bool fast_validate)
|
||||
{
|
||||
bool voltage_supported = false;
|
||||
bool voltage_supported;
|
||||
DC_FP_START();
|
||||
voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate);
|
||||
DC_FP_END();
|
||||
@ -3506,7 +3508,8 @@ void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
|
||||
calculated_states[i].dram_speed_mts = uclk_states[i] * 16 / 1000;
|
||||
|
||||
// FCLK:UCLK ratio is 1.08
|
||||
min_fclk_required_by_uclk = mul_u64_u32_shr(BIT_ULL(32) * 1080 / 1000000, uclk_states[i], 32);
|
||||
min_fclk_required_by_uclk = div_u64(((unsigned long long)uclk_states[i]) * 1080,
|
||||
1000000);
|
||||
|
||||
calculated_states[i].fabricclk_mhz = (min_fclk_required_by_uclk < min_dcfclk) ?
|
||||
min_dcfclk : min_fclk_required_by_uclk;
|
||||
|
@ -123,7 +123,7 @@ void dcn21_optimize_pwr_state(
|
||||
* PHY will hang on the next mode set attempt.
|
||||
* if enable PLL follow by disable PLL (without executing lane enable/disable),
|
||||
* RDPCS_PHY_DP_MPLLB_STATE remains 1,
|
||||
* which indicate that PLL disable attempt actually didn’t go through.
|
||||
* which indicate that PLL disable attempt actually didn't go through.
|
||||
* As a workaround, insert PHY lane enable/disable before PLL disable.
|
||||
*/
|
||||
void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx)
|
||||
@ -143,6 +143,7 @@ static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t optio
|
||||
struct dc_context *dc = abm->ctx;
|
||||
uint32_t ramping_boundary = 0xFFFF;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
|
||||
cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE;
|
||||
cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst;
|
||||
@ -212,6 +213,7 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
|
||||
if (abm && panel_cntl)
|
||||
dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.abm_set_backlight.header.type = DMUB_CMD__ABM;
|
||||
cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
|
||||
cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
|
||||
|
@ -1329,8 +1329,8 @@ validate_out:
|
||||
return out;
|
||||
}
|
||||
|
||||
bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
|
||||
bool fast_validate)
|
||||
static noinline bool dcn21_validate_bandwidth_fp(struct dc *dc,
|
||||
struct dc_state *context, bool fast_validate)
|
||||
{
|
||||
bool out = false;
|
||||
|
||||
@ -1383,6 +1383,22 @@ validate_out:
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some of the functions further below use the FPU, so we need to wrap this
|
||||
* with DC_FP_START()/DC_FP_END(). Use the same approach as for
|
||||
* dcn20_validate_bandwidth in dcn20_resource.c.
|
||||
*/
|
||||
bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
|
||||
bool fast_validate)
|
||||
{
|
||||
bool voltage_supported;
|
||||
DC_FP_START();
|
||||
voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate);
|
||||
DC_FP_END();
|
||||
return voltage_supported;
|
||||
}
|
||||
|
||||
static void dcn21_destroy_resource_pool(struct resource_pool **pool)
|
||||
{
|
||||
struct dcn21_resource_pool *dcn21_pool = TO_DCN21_RES_POOL(*pool);
|
||||
|
@ -4168,10 +4168,11 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
|
||||
locals->DIOSupport[i] = true;
|
||||
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
|
||||
if (locals->OutputBppPerState[i][k] == BPP_INVALID
|
||||
|| (mode_lib->vba.OutputFormat[k] == dm_420
|
||||
if (!mode_lib->vba.skip_dio_check[k]
|
||||
&& (locals->OutputBppPerState[i][k] == BPP_INVALID
|
||||
|| (mode_lib->vba.OutputFormat[k] == dm_420
|
||||
&& mode_lib->vba.Interlace[k] == true
|
||||
&& mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true)) {
|
||||
&& mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true))) {
|
||||
locals->DIOSupport[i] = false;
|
||||
}
|
||||
}
|
||||
|
@ -4289,10 +4289,11 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
|
||||
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
|
||||
locals->DIOSupport[i] = true;
|
||||
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
|
||||
if (locals->OutputBppPerState[i][k] == BPP_INVALID
|
||||
|| (mode_lib->vba.OutputFormat[k] == dm_420
|
||||
if (!mode_lib->vba.skip_dio_check[k]
|
||||
&& (locals->OutputBppPerState[i][k] == BPP_INVALID
|
||||
|| (mode_lib->vba.OutputFormat[k] == dm_420
|
||||
&& mode_lib->vba.Interlace[k] == true
|
||||
&& mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true)) {
|
||||
&& mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true))) {
|
||||
locals->DIOSupport[i] = false;
|
||||
}
|
||||
}
|
||||
|
@ -4257,10 +4257,11 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
|
||||
locals->DIOSupport[i] = true;
|
||||
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
|
||||
if (locals->OutputBppPerState[i][k] == BPP_INVALID
|
||||
|| (mode_lib->vba.OutputFormat[k] == dm_420
|
||||
if (!mode_lib->vba.skip_dio_check[k]
|
||||
&& (locals->OutputBppPerState[i][k] == BPP_INVALID
|
||||
|| (mode_lib->vba.OutputFormat[k] == dm_420
|
||||
&& mode_lib->vba.Interlace[k] == true
|
||||
&& mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true)) {
|
||||
&& mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true))) {
|
||||
locals->DIOSupport[i] = false;
|
||||
}
|
||||
}
|
||||
|
@ -4263,7 +4263,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
for (i = 0; i < v->soc.num_states; i++) {
|
||||
v->DIOSupport[i] = true;
|
||||
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
|
||||
if (v->BlendingAndTiming[k] == k && (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_hdmi)
|
||||
if (!v->skip_dio_check[k] && v->BlendingAndTiming[k] == k && (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_hdmi)
|
||||
&& (v->OutputBppPerState[i][k] == 0
|
||||
|| (v->OutputFormat[k] == dm_420 && v->Interlace[k] == true && v->ProgressiveToInterlaceUnitInOPP == true))) {
|
||||
v->DIOSupport[i] = false;
|
||||
|
@ -297,6 +297,7 @@ struct _vcs_dpi_display_output_params_st {
|
||||
int num_active_wb;
|
||||
int output_bpc;
|
||||
int output_type;
|
||||
int is_virtual;
|
||||
int output_format;
|
||||
int dsc_slices;
|
||||
int max_audio_sample_rate;
|
||||
|
@ -451,6 +451,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
|
||||
dout->output_bpp;
|
||||
mode_lib->vba.Output[mode_lib->vba.NumberOfActivePlanes] =
|
||||
(enum output_encoder_class) (dout->output_type);
|
||||
mode_lib->vba.skip_dio_check[mode_lib->vba.NumberOfActivePlanes] =
|
||||
dout->is_virtual;
|
||||
|
||||
if (!dout->dsc_enable)
|
||||
mode_lib->vba.ForcedOutputLinkBPP[mode_lib->vba.NumberOfActivePlanes] = dout->output_bpp;
|
||||
|
@ -340,6 +340,7 @@ struct vba_vars_st {
|
||||
unsigned int DSCInputBitPerComponent[DC__NUM_DPP__MAX];
|
||||
enum output_format_class OutputFormat[DC__NUM_DPP__MAX];
|
||||
enum output_encoder_class Output[DC__NUM_DPP__MAX];
|
||||
bool skip_dio_check[DC__NUM_DPP__MAX];
|
||||
unsigned int BlendingAndTiming[DC__NUM_DPP__MAX];
|
||||
bool SynchronizedVBlank;
|
||||
unsigned int NumberOfCursors[DC__NUM_DPP__MAX];
|
||||
|
@ -369,11 +369,6 @@ static bool decide_dsc_target_bpp_x16(
|
||||
/* enough bandwidth without dsc */
|
||||
*target_bpp_x16 = 0;
|
||||
should_use_dsc = false;
|
||||
} else if (policy->preferred_bpp_x16 > 0 &&
|
||||
policy->preferred_bpp_x16 <= range.max_target_bpp_x16 &&
|
||||
policy->preferred_bpp_x16 >= range.min_target_bpp_x16) {
|
||||
*target_bpp_x16 = policy->preferred_bpp_x16;
|
||||
should_use_dsc = true;
|
||||
} else if (target_bandwidth_kbps >= range.max_kbps) {
|
||||
/* use max target bpp allowed */
|
||||
*target_bpp_x16 = range.max_target_bpp_x16;
|
||||
@ -550,7 +545,7 @@ static bool setup_dsc_config(
|
||||
int target_bandwidth_kbps,
|
||||
const struct dc_crtc_timing *timing,
|
||||
int min_slice_height_override,
|
||||
int max_dsc_target_bpp_limit_override_x16,
|
||||
int max_dsc_target_bpp_limit_override,
|
||||
struct dc_dsc_config *dsc_cfg)
|
||||
{
|
||||
struct dsc_enc_caps dsc_common_caps;
|
||||
@ -569,7 +564,7 @@ static bool setup_dsc_config(
|
||||
|
||||
memset(dsc_cfg, 0, sizeof(struct dc_dsc_config));
|
||||
|
||||
dc_dsc_get_policy_for_timing(timing, max_dsc_target_bpp_limit_override_x16, &policy);
|
||||
dc_dsc_get_policy_for_timing(timing, max_dsc_target_bpp_limit_override, &policy);
|
||||
pic_width = timing->h_addressable + timing->h_border_left + timing->h_border_right;
|
||||
pic_height = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
|
||||
|
||||
@ -870,8 +865,8 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_da
|
||||
bool dc_dsc_compute_bandwidth_range(
|
||||
const struct display_stream_compressor *dsc,
|
||||
uint32_t dsc_min_slice_height_override,
|
||||
uint32_t min_bpp_x16,
|
||||
uint32_t max_bpp_x16,
|
||||
uint32_t min_bpp,
|
||||
uint32_t max_bpp,
|
||||
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
|
||||
const struct dc_crtc_timing *timing,
|
||||
struct dc_dsc_bw_range *range)
|
||||
@ -888,10 +883,10 @@ bool dc_dsc_compute_bandwidth_range(
|
||||
|
||||
if (is_dsc_possible)
|
||||
is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing,
|
||||
dsc_min_slice_height_override, max_bpp_x16, &config);
|
||||
dsc_min_slice_height_override, max_bpp, &config);
|
||||
|
||||
if (is_dsc_possible)
|
||||
get_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16, &dsc_common_caps, timing, range);
|
||||
get_dsc_bandwidth_range(min_bpp, max_bpp, &dsc_common_caps, timing, range);
|
||||
|
||||
return is_dsc_possible;
|
||||
}
|
||||
@ -900,7 +895,7 @@ bool dc_dsc_compute_config(
|
||||
const struct display_stream_compressor *dsc,
|
||||
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
|
||||
uint32_t dsc_min_slice_height_override,
|
||||
uint32_t max_target_bpp_limit_override_x16,
|
||||
uint32_t max_target_bpp_limit_override,
|
||||
uint32_t target_bandwidth_kbps,
|
||||
const struct dc_crtc_timing *timing,
|
||||
struct dc_dsc_config *dsc_cfg)
|
||||
@ -913,11 +908,11 @@ bool dc_dsc_compute_config(
|
||||
&dsc_enc_caps,
|
||||
target_bandwidth_kbps,
|
||||
timing, dsc_min_slice_height_override,
|
||||
max_target_bpp_limit_override_x16, dsc_cfg);
|
||||
max_target_bpp_limit_override, dsc_cfg);
|
||||
return is_dsc_possible;
|
||||
}
|
||||
|
||||
void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, uint32_t max_target_bpp_limit_override_x16, struct dc_dsc_policy *policy)
|
||||
void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, uint32_t max_target_bpp_limit_override, struct dc_dsc_policy *policy)
|
||||
{
|
||||
uint32_t bpc = 0;
|
||||
|
||||
@ -972,15 +967,13 @@ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, uint32_t
|
||||
return;
|
||||
}
|
||||
|
||||
policy->preferred_bpp_x16 = timing->dsc_fixed_bits_per_pixel_x16;
|
||||
|
||||
/* internal upper limit, default 16 bpp */
|
||||
if (policy->max_target_bpp > dsc_policy_max_target_bpp_limit)
|
||||
policy->max_target_bpp = dsc_policy_max_target_bpp_limit;
|
||||
|
||||
/* apply override */
|
||||
if (max_target_bpp_limit_override_x16 && policy->max_target_bpp > max_target_bpp_limit_override_x16 / 16)
|
||||
policy->max_target_bpp = max_target_bpp_limit_override_x16 / 16;
|
||||
if (max_target_bpp_limit_override && policy->max_target_bpp > max_target_bpp_limit_override)
|
||||
policy->max_target_bpp = max_target_bpp_limit_override;
|
||||
|
||||
/* enable DSC when not needed, default false */
|
||||
if (dsc_policy_enable_dsc_when_not_needed)
|
||||
|
@ -309,9 +309,9 @@ static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_cl
|
||||
static inline bool should_update_pstate_support(bool safe_to_lower, bool calc_support, bool cur_support)
|
||||
{
|
||||
if (cur_support != calc_support) {
|
||||
if (calc_support == true && safe_to_lower)
|
||||
if (calc_support && safe_to_lower)
|
||||
return true;
|
||||
else if (calc_support == false && !safe_to_lower)
|
||||
else if (!calc_support && !safe_to_lower)
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -58,6 +58,18 @@ enum dc_irq_source to_dal_irq_source_dcn10(
|
||||
return DC_IRQ_SOURCE_VBLANK5;
|
||||
case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
|
||||
return DC_IRQ_SOURCE_VBLANK6;
|
||||
case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
|
||||
return DC_IRQ_SOURCE_DC1_VLINE0;
|
||||
case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
|
||||
return DC_IRQ_SOURCE_DC2_VLINE0;
|
||||
case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL:
|
||||
return DC_IRQ_SOURCE_DC3_VLINE0;
|
||||
case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL:
|
||||
return DC_IRQ_SOURCE_DC4_VLINE0;
|
||||
case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL:
|
||||
return DC_IRQ_SOURCE_DC5_VLINE0;
|
||||
case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL:
|
||||
return DC_IRQ_SOURCE_DC6_VLINE0;
|
||||
case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
|
||||
return DC_IRQ_SOURCE_VUPDATE1;
|
||||
case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
|
||||
@ -167,6 +179,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
|
||||
.ack = NULL
|
||||
};
|
||||
|
||||
static const struct irq_source_info_funcs vline0_irq_info_funcs = {
|
||||
.set = NULL,
|
||||
.ack = NULL
|
||||
};
|
||||
|
||||
static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
|
||||
.set = NULL,
|
||||
.ack = NULL
|
||||
@ -241,6 +258,14 @@ static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
|
||||
.funcs = &vblank_irq_info_funcs\
|
||||
}
|
||||
|
||||
#define vline0_int_entry(reg_num)\
|
||||
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
|
||||
IRQ_REG_ENTRY(OTG, reg_num,\
|
||||
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
|
||||
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
|
||||
.funcs = &vline0_irq_info_funcs\
|
||||
}
|
||||
|
||||
#define dummy_irq_entry() \
|
||||
{\
|
||||
.funcs = &dummy_irq_info_funcs\
|
||||
@ -349,6 +374,12 @@ irq_source_info_dcn10[DAL_IRQ_SOURCES_NUMBER] = {
|
||||
vblank_int_entry(3),
|
||||
vblank_int_entry(4),
|
||||
vblank_int_entry(5),
|
||||
vline0_int_entry(0),
|
||||
vline0_int_entry(1),
|
||||
vline0_int_entry(2),
|
||||
vline0_int_entry(3),
|
||||
vline0_int_entry(4),
|
||||
vline0_int_entry(5),
|
||||
};
|
||||
|
||||
static const struct irq_service_funcs irq_service_funcs_dcn10 = {
|
||||
|
@ -160,6 +160,7 @@ enum irq_type
|
||||
IRQ_TYPE_PFLIP = DC_IRQ_SOURCE_PFLIP1,
|
||||
IRQ_TYPE_VUPDATE = DC_IRQ_SOURCE_VUPDATE1,
|
||||
IRQ_TYPE_VBLANK = DC_IRQ_SOURCE_VBLANK1,
|
||||
IRQ_TYPE_VLINE0 = DC_IRQ_SOURCE_DC1_VLINE0,
|
||||
};
|
||||
|
||||
#define DAL_VALID_IRQ_SRC_NUM(src) \
|
||||
|
@ -47,10 +47,10 @@
|
||||
|
||||
/* Firmware versioning. */
|
||||
#ifdef DMUB_EXPOSE_VERSION
|
||||
#define DMUB_FW_VERSION_GIT_HASH 0xca1cd48c9
|
||||
#define DMUB_FW_VERSION_GIT_HASH 0x6444c02e7
|
||||
#define DMUB_FW_VERSION_MAJOR 0
|
||||
#define DMUB_FW_VERSION_MINOR 0
|
||||
#define DMUB_FW_VERSION_REVISION 50
|
||||
#define DMUB_FW_VERSION_REVISION 51
|
||||
#define DMUB_FW_VERSION_TEST 0
|
||||
#define DMUB_FW_VERSION_VBIOS 0
|
||||
#define DMUB_FW_VERSION_HOTFIX 0
|
||||
@ -491,13 +491,34 @@ struct dmub_rb_cmd_enable_disp_power_gating {
|
||||
struct dmub_cmd_enable_disp_power_gating_data power_gating;
|
||||
};
|
||||
|
||||
struct dmub_cmd_dig1_transmitter_control_data {
|
||||
struct dmub_dig_transmitter_control_data_v1_7 {
|
||||
uint8_t phyid; /**< 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4=UNIPHYE, 5=UNIPHYF */
|
||||
uint8_t action; /**< Defined as ATOM_TRANSMITER_ACTION_xxx */
|
||||
union {
|
||||
uint8_t digmode; /**< enum atom_encode_mode_def */
|
||||
uint8_t dplaneset; /**< DP voltage swing and pre-emphasis value, "DP_LANE_SET__xDB_y_zV" */
|
||||
} mode_laneset;
|
||||
uint8_t lanenum; /**< Number of lanes */
|
||||
union {
|
||||
uint32_t symclk_10khz; /**< Symbol Clock in 10Khz */
|
||||
} symclk_units;
|
||||
uint8_t hpdsel; /**< =1: HPD1, =2: HPD2, ..., =6: HPD6, =0: HPD is not assigned */
|
||||
uint8_t digfe_sel; /**< DIG front-end selection, bit0 means DIG0 FE is enabled */
|
||||
uint8_t connobj_id; /**< Connector Object Id defined in ObjectId.h */
|
||||
uint8_t reserved0; /**< For future use */
|
||||
uint8_t reserved1; /**< For future use */
|
||||
uint8_t reserved2[3]; /**< For future use */
|
||||
uint32_t reserved3[11]; /**< For future use */
|
||||
};
|
||||
|
||||
union dmub_cmd_dig1_transmitter_control_data {
|
||||
struct dig_transmitter_control_parameters_v1_6 dig;
|
||||
struct dmub_dig_transmitter_control_data_v1_7 dig_v1_7;
|
||||
};
|
||||
|
||||
struct dmub_rb_cmd_dig1_transmitter_control {
|
||||
struct dmub_cmd_header header;
|
||||
struct dmub_cmd_dig1_transmitter_control_data transmitter_control;
|
||||
union dmub_cmd_dig1_transmitter_control_data transmitter_control;
|
||||
};
|
||||
|
||||
struct dmub_rb_cmd_dpphy_init {
|
||||
|
@ -548,6 +548,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
|
||||
TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
|
||||
hdcp->connection.is_hdcp2_revoked = 1;
|
||||
status = MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED;
|
||||
} else {
|
||||
status = MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&psp->hdcp_context.mutex);
|
||||
|
@ -213,6 +213,7 @@ enum PP_FEATURE_MASK {
|
||||
PP_ACG_MASK = 0x10000,
|
||||
PP_STUTTER_MODE = 0x20000,
|
||||
PP_AVFS_MASK = 0x40000,
|
||||
PP_GFX_DCS_MASK = 0x80000,
|
||||
};
|
||||
|
||||
enum DC_FEATURE_MASK {
|
||||
|
@ -0,0 +1,35 @@
|
||||
/*
|
||||
* Copyright (C) 2020 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
|
||||
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#ifndef _smuio_11_0_6_OFFSET_HEADER
|
||||
#define _smuio_11_0_6_OFFSET_HEADER
|
||||
|
||||
|
||||
|
||||
// addressBlock: smuio_smuio_SmuSmuioDec
|
||||
// base address: 0x5a000
|
||||
#define mmCGTT_ROM_CLK_CTRL0 0x00e4
|
||||
#define mmCGTT_ROM_CLK_CTRL0_BASE_IDX 0
|
||||
#define mmROM_INDEX 0x00e5
|
||||
#define mmROM_INDEX_BASE_IDX 0
|
||||
#define mmROM_DATA 0x00e6
|
||||
#define mmROM_DATA_BASE_IDX 0
|
||||
|
||||
#endif
|
@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Copyright (C) 2020 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
|
||||
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#ifndef _smuio_11_0_6_SH_MASK_HEADER
|
||||
#define _smuio_11_0_6_SH_MASK_HEADER
|
||||
|
||||
|
||||
//CGTT_ROM_CLK_CTRL0
|
||||
#define CGTT_ROM_CLK_CTRL0__ON_DELAY__SHIFT 0x0
|
||||
#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
|
||||
#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e
|
||||
#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f
|
||||
#define CGTT_ROM_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
|
||||
#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
|
||||
#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000L
|
||||
#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000L
|
||||
//ROM_INDEX
|
||||
#define ROM_INDEX__ROM_INDEX__SHIFT 0x0
|
||||
#define ROM_INDEX__ROM_INDEX_MASK 0x01FFFFFFL
|
||||
//ROM_DATA
|
||||
#define ROM_DATA__ROM_DATA__SHIFT 0x0
|
||||
#define ROM_DATA__ROM_DATA_MASK 0xFFFFFFFFL
|
||||
|
||||
#endif
|
@ -3059,7 +3059,8 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
|
||||
char *buf)
|
||||
{
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
uint32_t limit = 0;
|
||||
int limit_type = to_sensor_dev_attr(attr)->index;
|
||||
uint32_t limit = limit_type << 24;
|
||||
ssize_t size;
|
||||
int r;
|
||||
|
||||
@ -3073,7 +3074,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
|
||||
}
|
||||
|
||||
if (is_support_sw_smu(adev)) {
|
||||
smu_get_power_limit(&adev->smu, &limit, true);
|
||||
smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_MAX);
|
||||
size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
|
||||
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
|
||||
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
|
||||
@ -3093,7 +3094,8 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
|
||||
char *buf)
|
||||
{
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
uint32_t limit = 0;
|
||||
int limit_type = to_sensor_dev_attr(attr)->index;
|
||||
uint32_t limit = limit_type << 24;
|
||||
ssize_t size;
|
||||
int r;
|
||||
|
||||
@ -3107,7 +3109,7 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
|
||||
}
|
||||
|
||||
if (is_support_sw_smu(adev)) {
|
||||
smu_get_power_limit(&adev->smu, &limit, false);
|
||||
smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_CURRENT);
|
||||
size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
|
||||
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
|
||||
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
|
||||
@ -3122,6 +3124,15 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
|
||||
return size;
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
int limit_type = to_sensor_dev_attr(attr)->index;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n",
|
||||
limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT");
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
@ -3129,6 +3140,7 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
|
||||
size_t count)
|
||||
{
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
int limit_type = to_sensor_dev_attr(attr)->index;
|
||||
int err;
|
||||
u32 value;
|
||||
|
||||
@ -3143,7 +3155,7 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
|
||||
return err;
|
||||
|
||||
value = value / 1000000; /* convert to Watt */
|
||||
|
||||
value |= limit_type << 24;
|
||||
|
||||
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||
if (err < 0) {
|
||||
@ -3355,6 +3367,12 @@ static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg,
|
||||
static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
|
||||
static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
|
||||
static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
|
||||
static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
|
||||
static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
|
||||
static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
|
||||
static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
|
||||
static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
|
||||
static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
|
||||
static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
|
||||
static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
|
||||
static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
|
||||
@ -3393,6 +3411,12 @@ static struct attribute *hwmon_attributes[] = {
|
||||
&sensor_dev_attr_power1_cap_max.dev_attr.attr,
|
||||
&sensor_dev_attr_power1_cap_min.dev_attr.attr,
|
||||
&sensor_dev_attr_power1_cap.dev_attr.attr,
|
||||
&sensor_dev_attr_power1_label.dev_attr.attr,
|
||||
&sensor_dev_attr_power2_average.dev_attr.attr,
|
||||
&sensor_dev_attr_power2_cap_max.dev_attr.attr,
|
||||
&sensor_dev_attr_power2_cap_min.dev_attr.attr,
|
||||
&sensor_dev_attr_power2_cap.dev_attr.attr,
|
||||
&sensor_dev_attr_power2_label.dev_attr.attr,
|
||||
&sensor_dev_attr_freq1_input.dev_attr.attr,
|
||||
&sensor_dev_attr_freq1_label.dev_attr.attr,
|
||||
&sensor_dev_attr_freq2_input.dev_attr.attr,
|
||||
@ -3485,8 +3509,9 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
||||
effective_mode &= ~S_IWUSR;
|
||||
}
|
||||
|
||||
if (((adev->flags & AMD_IS_APU) ||
|
||||
adev->family == AMDGPU_FAMILY_SI) && /* not implemented yet */
|
||||
if (((adev->family == AMDGPU_FAMILY_SI) ||
|
||||
((adev->flags & AMD_IS_APU) &&
|
||||
(adev->asic_type != CHIP_VANGOGH))) && /* not implemented yet */
|
||||
(attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
|
||||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
|
||||
@ -3549,6 +3574,16 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
||||
attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
|
||||
return 0;
|
||||
|
||||
/* only Vangogh has fast PPT limit and power labels */
|
||||
if (!(adev->asic_type == CHIP_VANGOGH) &&
|
||||
(attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power2_label.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power1_label.dev_attr.attr))
|
||||
return 0;
|
||||
|
||||
return effective_mode;
|
||||
}
|
||||
|
||||
|
@ -161,6 +161,19 @@ enum smu_power_src_type
|
||||
SMU_POWER_SOURCE_COUNT,
|
||||
};
|
||||
|
||||
enum smu_ppt_limit_type
|
||||
{
|
||||
SMU_DEFAULT_PPT_LIMIT = 0,
|
||||
SMU_FAST_PPT_LIMIT,
|
||||
};
|
||||
|
||||
enum smu_ppt_limit_level
|
||||
{
|
||||
SMU_PPT_LIMIT_MIN = -1,
|
||||
SMU_PPT_LIMIT_CURRENT,
|
||||
SMU_PPT_LIMIT_MAX,
|
||||
};
|
||||
|
||||
enum smu_memory_pool_size
|
||||
{
|
||||
SMU_MEMORY_POOL_SIZE_ZERO = 0,
|
||||
@ -701,6 +714,12 @@ struct pptable_funcs {
|
||||
*/
|
||||
int (*get_power_limit)(struct smu_context *smu);
|
||||
|
||||
/**
|
||||
* @get_ppt_limit: Get the device's ppt limits.
|
||||
*/
|
||||
int (*get_ppt_limit)(struct smu_context *smu, uint32_t *ppt_limit,
|
||||
enum smu_ppt_limit_type limit_type, enum smu_ppt_limit_level limit_level);
|
||||
|
||||
/**
|
||||
* @set_df_cstate: Set data fabric cstate.
|
||||
*/
|
||||
@ -1218,7 +1237,7 @@ int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed);
|
||||
|
||||
int smu_get_power_limit(struct smu_context *smu,
|
||||
uint32_t *limit,
|
||||
bool max_setting);
|
||||
enum smu_ppt_limit_level limit_level);
|
||||
|
||||
int smu_set_power_limit(struct smu_context *smu, uint32_t limit);
|
||||
int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf);
|
||||
|
@ -210,6 +210,10 @@
|
||||
__SMU_DUMMY_MAP(DisallowGpo), \
|
||||
__SMU_DUMMY_MAP(Enable2ndUSB20Port), \
|
||||
__SMU_DUMMY_MAP(RequestActiveWgp), \
|
||||
__SMU_DUMMY_MAP(SetFastPPTLimit), \
|
||||
__SMU_DUMMY_MAP(SetSlowPPTLimit), \
|
||||
__SMU_DUMMY_MAP(GetFastPPTLimit), \
|
||||
__SMU_DUMMY_MAP(GetSlowPPTLimit), \
|
||||
|
||||
#undef __SMU_DUMMY_MAP
|
||||
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
|
||||
|
@ -129,6 +129,15 @@ struct smu_11_0_power_context {
|
||||
enum smu_11_0_power_state power_state;
|
||||
};
|
||||
|
||||
struct smu_11_5_power_context {
|
||||
uint32_t power_source;
|
||||
uint8_t in_power_limit_boost_mode;
|
||||
enum smu_11_0_power_state power_state;
|
||||
|
||||
uint32_t current_fast_ppt_limit;
|
||||
uint32_t max_fast_ppt_limit;
|
||||
};
|
||||
|
||||
enum smu_v11_0_baco_seq {
|
||||
BACO_SEQ_BACO = 0,
|
||||
BACO_SEQ_MSR,
|
||||
@ -272,10 +281,6 @@ int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu);
|
||||
|
||||
int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu);
|
||||
|
||||
void smu_v11_0_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics);
|
||||
|
||||
void smu_v11_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics);
|
||||
|
||||
int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
|
||||
bool enablement);
|
||||
|
||||
|
@ -104,7 +104,11 @@
|
||||
#define PPSMC_MSG_DramLogSetDramBufferSize 0x46
|
||||
#define PPSMC_MSG_RequestActiveWgp 0x47
|
||||
#define PPSMC_MSG_QueryActiveWgp 0x48
|
||||
#define PPSMC_Message_Count 0x49
|
||||
#define PPSMC_MSG_SetFastPPTLimit 0x49
|
||||
#define PPSMC_MSG_SetSlowPPTLimit 0x4A
|
||||
#define PPSMC_MSG_GetFastPPTLimit 0x4B
|
||||
#define PPSMC_MSG_GetSlowPPTLimit 0x4C
|
||||
#define PPSMC_Message_Count 0x4D
|
||||
|
||||
//Argument for PPSMC_MSG_GfxDeviceDriverReset
|
||||
enum {
|
||||
|
@ -60,7 +60,5 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
|
||||
|
||||
int smu_v12_0_set_driver_table_location(struct smu_context *smu);
|
||||
|
||||
void smu_v12_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
@ -1487,7 +1487,7 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
|
||||
}
|
||||
|
||||
if (!smu10_data->fine_grain_enabled) {
|
||||
pr_err("Fine grain not started\n");
|
||||
pr_err("pp_od_clk_voltage is not accessible if power_dpm_force_perfomance_level is not in manual mode!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -2044,22 +2044,9 @@ int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
|
||||
|
||||
int smu_get_power_limit(struct smu_context *smu,
|
||||
uint32_t *limit,
|
||||
bool max_setting)
|
||||
{
|
||||
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
*limit = (max_setting ? smu->max_power_limit : smu->current_power_limit);
|
||||
|
||||
mutex_unlock(&smu->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
|
||||
enum smu_ppt_limit_level limit_level)
|
||||
{
|
||||
uint32_t limit_type = *limit >> 24;
|
||||
int ret = 0;
|
||||
|
||||
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
|
||||
@ -2067,6 +2054,43 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
|
||||
if (smu->ppt_funcs->get_ppt_limit)
|
||||
ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
|
||||
} else {
|
||||
switch (limit_level) {
|
||||
case SMU_PPT_LIMIT_CURRENT:
|
||||
*limit = smu->current_power_limit;
|
||||
break;
|
||||
case SMU_PPT_LIMIT_MAX:
|
||||
*limit = smu->max_power_limit;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&smu->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
|
||||
{
|
||||
uint32_t limit_type = limit >> 24;
|
||||
int ret = 0;
|
||||
|
||||
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (limit_type != SMU_DEFAULT_PPT_LIMIT)
|
||||
if (smu->ppt_funcs->set_power_limit) {
|
||||
ret = smu->ppt_funcs->set_power_limit(smu, limit);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (limit > smu->max_power_limit) {
|
||||
dev_err(smu->adev->dev,
|
||||
"New power limit (%d) is over the max allowed %d\n",
|
||||
|
@ -2239,7 +2239,7 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
|
||||
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 0);
|
||||
|
||||
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
|
||||
gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
|
||||
@ -2276,6 +2276,8 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
|
||||
gpu_metrics->pcie_link_speed =
|
||||
arcturus_get_current_pcie_link_speed(smu);
|
||||
|
||||
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
|
||||
|
||||
*table = (void *)gpu_metrics;
|
||||
|
||||
return sizeof(struct gpu_metrics_v1_0);
|
||||
|
@ -2314,7 +2314,7 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
|
||||
|
||||
mutex_unlock(&smu->metrics_lock);
|
||||
|
||||
smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
|
||||
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 0);
|
||||
|
||||
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
|
||||
gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
|
||||
@ -2354,6 +2354,8 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
|
||||
gpu_metrics->pcie_link_speed =
|
||||
smu_v11_0_get_current_pcie_link_speed(smu);
|
||||
|
||||
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
|
||||
|
||||
*table = (void *)gpu_metrics;
|
||||
|
||||
return sizeof(struct gpu_metrics_v1_0);
|
||||
|
@ -261,6 +261,11 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_GPO_BIT);
|
||||
}
|
||||
|
||||
if ((adev->pm.pp_feature & PP_GFX_DCS_MASK) &&
|
||||
(adev->asic_type > CHIP_SIENNA_CICHLID) &&
|
||||
!(adev->flags & AMD_IS_APU))
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_DCS_BIT);
|
||||
|
||||
if (adev->pm.pp_feature & PP_MCLK_DPM_MASK)
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
|
||||
| FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
|
||||
@ -294,6 +299,12 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,
|
||||
smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MM_DPM_PG_BIT);
|
||||
|
||||
if (smu->dc_controlled_by_gpio)
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
|
||||
|
||||
if (amdgpu_aspm == 1)
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2950,7 +2961,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
|
||||
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 0);
|
||||
|
||||
gpu_metrics->temperature_edge = metrics->TemperatureEdge;
|
||||
gpu_metrics->temperature_hotspot = metrics->TemperatureHotspot;
|
||||
@ -2993,6 +3004,8 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
|
||||
gpu_metrics->pcie_link_speed =
|
||||
smu_v11_0_get_current_pcie_link_speed(smu);
|
||||
|
||||
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
|
||||
|
||||
*table = (void *)gpu_metrics;
|
||||
|
||||
return sizeof(struct gpu_metrics_v1_0);
|
||||
|
@ -474,12 +474,14 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu)
|
||||
int smu_v11_0_init_power(struct smu_context *smu)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
size_t size = smu->adev->asic_type == CHIP_VANGOGH ?
|
||||
sizeof(struct smu_11_5_power_context) :
|
||||
sizeof(struct smu_11_0_power_context);
|
||||
|
||||
smu_power->power_context = kzalloc(sizeof(struct smu_11_0_power_context),
|
||||
GFP_KERNEL);
|
||||
smu_power->power_context = kzalloc(size, GFP_KERNEL);
|
||||
if (!smu_power->power_context)
|
||||
return -ENOMEM;
|
||||
smu_power->power_context_size = sizeof(struct smu_11_0_power_context);
|
||||
smu_power->power_context_size = size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2021,30 +2023,6 @@ int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
|
||||
return link_speed[speed_level];
|
||||
}
|
||||
|
||||
void smu_v11_0_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics)
|
||||
{
|
||||
memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0));
|
||||
|
||||
gpu_metrics->common_header.structure_size =
|
||||
sizeof(struct gpu_metrics_v1_0);
|
||||
gpu_metrics->common_header.format_revision = 1;
|
||||
gpu_metrics->common_header.content_revision = 0;
|
||||
|
||||
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
|
||||
}
|
||||
|
||||
void smu_v11_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics)
|
||||
{
|
||||
memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v2_0));
|
||||
|
||||
gpu_metrics->common_header.structure_size =
|
||||
sizeof(struct gpu_metrics_v2_0);
|
||||
gpu_metrics->common_header.format_revision = 2;
|
||||
gpu_metrics->common_header.content_revision = 0;
|
||||
|
||||
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
|
||||
}
|
||||
|
||||
int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
|
||||
bool enablement)
|
||||
{
|
||||
|
@ -122,6 +122,10 @@ static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
|
||||
MSG_MAP(SetSoftMinCclk, PPSMC_MSG_SetSoftMinCclk, 0),
|
||||
MSG_MAP(SetSoftMaxCclk, PPSMC_MSG_SetSoftMaxCclk, 0),
|
||||
MSG_MAP(RequestActiveWgp, PPSMC_MSG_RequestActiveWgp, 0),
|
||||
MSG_MAP(SetFastPPTLimit, PPSMC_MSG_SetFastPPTLimit, 0),
|
||||
MSG_MAP(SetSlowPPTLimit, PPSMC_MSG_SetSlowPPTLimit, 0),
|
||||
MSG_MAP(GetFastPPTLimit, PPSMC_MSG_GetFastPPTLimit, 0),
|
||||
MSG_MAP(GetSlowPPTLimit, PPSMC_MSG_GetSlowPPTLimit, 0),
|
||||
};
|
||||
|
||||
static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = {
|
||||
@ -1406,7 +1410,7 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
smu_v11_0_init_gpu_metrics_v2_0(gpu_metrics);
|
||||
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 0);
|
||||
|
||||
gpu_metrics->temperature_gfx = metrics.GfxTemperature;
|
||||
gpu_metrics->temperature_soc = metrics.SocTemperature;
|
||||
@ -1442,6 +1446,8 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
|
||||
|
||||
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
|
||||
|
||||
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
|
||||
|
||||
*table = (void *)gpu_metrics;
|
||||
|
||||
return sizeof(struct gpu_metrics_v2_0);
|
||||
@ -1455,7 +1461,8 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
|
||||
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
|
||||
|
||||
if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
|
||||
dev_warn(smu->adev->dev, "Fine grain is not enabled!\n");
|
||||
dev_warn(smu->adev->dev,
|
||||
"pp_od_clk_voltage is not accessible if power_dpm_force_perfomance_level is not in manual mode!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1771,6 +1778,112 @@ static int vangogh_mode2_reset(struct smu_context *smu)
|
||||
return vangogh_mode_reset(smu, SMU_RESET_MODE_2);
|
||||
}
|
||||
|
||||
static int vangogh_get_power_limit(struct smu_context *smu)
|
||||
{
|
||||
struct smu_11_5_power_context *power_context =
|
||||
smu->smu_power.power_context;
|
||||
uint32_t ppt_limit;
|
||||
int ret = 0;
|
||||
|
||||
if (smu->adev->pm.fw_version < 0x43f1e00)
|
||||
return ret;
|
||||
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSlowPPTLimit, &ppt_limit);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "Get slow PPT limit failed!\n");
|
||||
return ret;
|
||||
}
|
||||
/* convert from milliwatt to watt */
|
||||
smu->current_power_limit = ppt_limit / 1000;
|
||||
smu->max_power_limit = 29;
|
||||
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "Get fast PPT limit failed!\n");
|
||||
return ret;
|
||||
}
|
||||
/* convert from milliwatt to watt */
|
||||
power_context->current_fast_ppt_limit = ppt_limit / 1000;
|
||||
power_context->max_fast_ppt_limit = 30;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vangogh_get_ppt_limit(struct smu_context *smu,
|
||||
uint32_t *ppt_limit,
|
||||
enum smu_ppt_limit_type type,
|
||||
enum smu_ppt_limit_level level)
|
||||
{
|
||||
struct smu_11_5_power_context *power_context =
|
||||
smu->smu_power.power_context;
|
||||
|
||||
if (!power_context)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (type == SMU_FAST_PPT_LIMIT) {
|
||||
switch (level) {
|
||||
case SMU_PPT_LIMIT_MAX:
|
||||
*ppt_limit = power_context->max_fast_ppt_limit;
|
||||
break;
|
||||
case SMU_PPT_LIMIT_CURRENT:
|
||||
*ppt_limit = power_context->current_fast_ppt_limit;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vangogh_set_power_limit(struct smu_context *smu, uint32_t ppt_limit)
|
||||
{
|
||||
struct smu_11_5_power_context *power_context =
|
||||
smu->smu_power.power_context;
|
||||
uint32_t limit_type = ppt_limit >> 24;
|
||||
int ret = 0;
|
||||
|
||||
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
|
||||
dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
switch (limit_type) {
|
||||
case SMU_DEFAULT_PPT_LIMIT:
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_SetSlowPPTLimit,
|
||||
ppt_limit * 1000, /* convert from watt to milliwatt */
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
smu->current_power_limit = ppt_limit;
|
||||
break;
|
||||
case SMU_FAST_PPT_LIMIT:
|
||||
ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24);
|
||||
if (ppt_limit > power_context->max_fast_ppt_limit) {
|
||||
dev_err(smu->adev->dev,
|
||||
"New power limit (%d) is over the max allowed %d\n",
|
||||
ppt_limit, power_context->max_fast_ppt_limit);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_SetFastPPTLimit,
|
||||
ppt_limit * 1000, /* convert from watt to milliwatt */
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
power_context->current_fast_ppt_limit = ppt_limit;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct pptable_funcs vangogh_ppt_funcs = {
|
||||
|
||||
.check_fw_status = smu_v11_0_check_fw_status,
|
||||
@ -1807,6 +1920,9 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
|
||||
.post_init = vangogh_post_smu_init,
|
||||
.mode2_reset = vangogh_mode2_reset,
|
||||
.gfx_off_control = smu_v11_0_gfx_off_control,
|
||||
.get_ppt_limit = vangogh_get_ppt_limit,
|
||||
.get_power_limit = vangogh_get_power_limit,
|
||||
.set_power_limit = vangogh_set_power_limit,
|
||||
};
|
||||
|
||||
void vangogh_set_ppt_funcs(struct smu_context *smu)
|
||||
|
@ -350,7 +350,8 @@ static int renoir_od_edit_dpm_table(struct smu_context *smu,
|
||||
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
|
||||
|
||||
if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
|
||||
dev_warn(smu->adev->dev, "Fine grain is not enabled!\n");
|
||||
dev_warn(smu->adev->dev,
|
||||
"pp_od_clk_voltage is not accessible if power_dpm_force_perfomance_level is not in manual mode!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1129,7 +1130,7 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
|
||||
*value = metrics->AverageUvdActivity / 100;
|
||||
break;
|
||||
case METRICS_AVERAGE_SOCKETPOWER:
|
||||
*value = metrics->CurrentSocketPower << 8;
|
||||
*value = (metrics->CurrentSocketPower << 8) / 1000;
|
||||
break;
|
||||
case METRICS_TEMPERATURE_EDGE:
|
||||
*value = (metrics->GfxTemperature / 100) *
|
||||
@ -1257,7 +1258,7 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
smu_v12_0_init_gpu_metrics_v2_0(gpu_metrics);
|
||||
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 0);
|
||||
|
||||
gpu_metrics->temperature_gfx = metrics.GfxTemperature;
|
||||
gpu_metrics->temperature_soc = metrics.SocTemperature;
|
||||
@ -1298,6 +1299,8 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
|
||||
|
||||
gpu_metrics->fan_pwm = metrics.FanPwm;
|
||||
|
||||
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
|
||||
|
||||
*table = (void *)gpu_metrics;
|
||||
|
||||
return sizeof(struct gpu_metrics_v2_0);
|
||||
|
@ -278,15 +278,3 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu)
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void smu_v12_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics)
|
||||
{
|
||||
memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v2_0));
|
||||
|
||||
gpu_metrics->common_header.structure_size =
|
||||
sizeof(struct gpu_metrics_v2_0);
|
||||
gpu_metrics->common_header.format_revision = 2;
|
||||
gpu_metrics->common_header.content_revision = 0;
|
||||
|
||||
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
|
||||
}
|
||||
|
@ -746,3 +746,31 @@ int smu_cmn_get_metrics_table(struct smu_context *smu,
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
|
||||
{
|
||||
struct metrics_table_header *header = (struct metrics_table_header *)table;
|
||||
uint16_t structure_size;
|
||||
|
||||
#define METRICS_VERSION(a, b) ((a << 16) | b )
|
||||
|
||||
switch (METRICS_VERSION(frev, crev)) {
|
||||
case METRICS_VERSION(1, 0):
|
||||
structure_size = sizeof(struct gpu_metrics_v1_0);
|
||||
break;
|
||||
case METRICS_VERSION(2, 0):
|
||||
structure_size = sizeof(struct gpu_metrics_v2_0);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
#undef METRICS_VERSION
|
||||
|
||||
memset(header, 0xFF, structure_size);
|
||||
|
||||
header->format_revision = frev;
|
||||
header->content_revision = crev;
|
||||
header->structure_size = structure_size;
|
||||
|
||||
}
|
||||
|
@ -97,5 +97,7 @@ int smu_cmn_get_metrics_table(struct smu_context *smu,
|
||||
void *metrics_table,
|
||||
bool bypass_cache);
|
||||
|
||||
void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
@ -2478,6 +2478,9 @@ int radeon_asic_init(struct radeon_device *rdev)
|
||||
if (rdev->family == CHIP_HAINAN) {
|
||||
rdev->has_uvd = false;
|
||||
rdev->has_vce = false;
|
||||
} else if (rdev->family == CHIP_OLAND) {
|
||||
rdev->has_uvd = true;
|
||||
rdev->has_vce = false;
|
||||
} else {
|
||||
rdev->has_uvd = true;
|
||||
rdev->has_vce = true;
|
||||
|
@ -68,7 +68,6 @@ int radeon_vce_init(struct radeon_device *rdev)
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
case CHIP_VERDE:
|
||||
case CHIP_OLAND:
|
||||
case CHIP_ARUBA:
|
||||
fw_name = FIRMWARE_TAHITI;
|
||||
break;
|
||||
|
@ -187,7 +187,7 @@ static void rs690_mc_init(struct radeon_device *rdev)
|
||||
/* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
|
||||
* memory is present.
|
||||
*/
|
||||
if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
|
||||
if (!rdev->mc.igp_sideport_enabled && radeon_fastfb == 1) {
|
||||
DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
|
||||
(unsigned long long)rdev->mc.aper_base, k8_addr);
|
||||
rdev->mc.aper_base = (resource_size_t)k8_addr;
|
||||
|
@ -169,7 +169,6 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
|
||||
chip_id = 0x01000015;
|
||||
break;
|
||||
case CHIP_PITCAIRN:
|
||||
case CHIP_OLAND:
|
||||
chip_id = 0x01000016;
|
||||
break;
|
||||
case CHIP_ARUBA:
|
||||
|
Loading…
Reference in New Issue
Block a user