Merge remote-tracking branch 'drm/drm-next' into drm-misc-next
Requested for getting some i915 fixes back into drm-misc-next by danvet. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
This commit is contained in:
commit
c1b164a5f7
@ -7,6 +7,7 @@ output bus.
|
||||
|
||||
Required properties:
|
||||
- compatible: "mediatek,<chip>-dpi"
|
||||
the supported chips are mt2701 , mt8173 and mt8183.
|
||||
- reg: Physical base address and length of the controller's registers
|
||||
- interrupts: The interrupt signal from the function block.
|
||||
- clocks: device clocks
|
||||
|
@ -207,10 +207,10 @@ DPIO
|
||||
CSR firmware support for DMC
|
||||
----------------------------
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/intel_csr.c
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_csr.c
|
||||
:doc: csr support for dmc
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/intel_csr.c
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_csr.c
|
||||
:internal:
|
||||
|
||||
Video BIOS Table (VBT)
|
||||
@ -332,7 +332,7 @@ This process is dubbed relocation.
|
||||
GEM BO Management Implementation Details
|
||||
----------------------------------------
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_vma.h
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_vma_types.h
|
||||
:doc: Virtual Memory Address
|
||||
|
||||
Buffer Object Eviction
|
||||
@ -382,7 +382,7 @@ Logical Rings, Logical Ring Contexts and Execlists
|
||||
Global GTT views
|
||||
----------------
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_gtt.c
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_vma_types.h
|
||||
:doc: Global GTT views
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_gtt.c
|
||||
|
@ -5629,7 +5629,7 @@ F: include/uapi/drm/lima_drm.h
|
||||
T: git git://anongit.freedesktop.org/drm/drm-misc
|
||||
|
||||
DRM DRIVERS FOR MEDIATEK
|
||||
M: CK Hu <ck.hu@mediatek.com>
|
||||
M: Chun-Kuang Hu <chunkuang.hu@kernel.org>
|
||||
M: Philipp Zabel <p.zabel@pengutronix.de>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Supported
|
||||
|
@ -1,5 +1,6 @@
|
||||
# SPDX-License-Identifier: MIT
|
||||
menu "ACP (Audio CoProcessor) Configuration"
|
||||
depends on DRM_AMDGPU
|
||||
|
||||
config DRM_AMD_ACP
|
||||
bool "Enable AMD Audio CoProcessor IP support"
|
||||
|
@ -994,6 +994,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
|
||||
uint32_t acc_flags);
|
||||
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
|
||||
uint32_t acc_flags);
|
||||
void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
|
||||
uint32_t acc_flags);
|
||||
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
|
||||
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
|
||||
|
||||
|
@ -79,7 +79,7 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
|
||||
dev_warn(adev->dev,
|
||||
"Invalid sdma engine id (%d), using engine id 0\n",
|
||||
engine_id);
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case 0:
|
||||
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
|
||||
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
|
||||
|
@ -91,47 +91,51 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const
|
||||
priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
|
||||
ctx->init_priority : ctx->override_priority;
|
||||
switch (hw_ip) {
|
||||
case AMDGPU_HW_IP_GFX:
|
||||
sched = &adev->gfx.gfx_ring[0].sched;
|
||||
scheds = &sched;
|
||||
num_scheds = 1;
|
||||
break;
|
||||
case AMDGPU_HW_IP_COMPUTE:
|
||||
hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
|
||||
scheds = adev->gfx.compute_prio_sched[hw_prio];
|
||||
num_scheds = adev->gfx.num_compute_sched[hw_prio];
|
||||
break;
|
||||
case AMDGPU_HW_IP_DMA:
|
||||
scheds = adev->sdma.sdma_sched;
|
||||
num_scheds = adev->sdma.num_sdma_sched;
|
||||
break;
|
||||
case AMDGPU_HW_IP_UVD:
|
||||
sched = &adev->uvd.inst[0].ring.sched;
|
||||
scheds = &sched;
|
||||
num_scheds = 1;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCE:
|
||||
sched = &adev->vce.ring[0].sched;
|
||||
scheds = &sched;
|
||||
num_scheds = 1;
|
||||
break;
|
||||
case AMDGPU_HW_IP_UVD_ENC:
|
||||
sched = &adev->uvd.inst[0].ring_enc[0].sched;
|
||||
scheds = &sched;
|
||||
num_scheds = 1;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCN_DEC:
|
||||
scheds = adev->vcn.vcn_dec_sched;
|
||||
num_scheds = adev->vcn.num_vcn_dec_sched;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCN_ENC:
|
||||
scheds = adev->vcn.vcn_enc_sched;
|
||||
num_scheds = adev->vcn.num_vcn_enc_sched;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCN_JPEG:
|
||||
scheds = adev->jpeg.jpeg_sched;
|
||||
num_scheds = adev->jpeg.num_jpeg_sched;
|
||||
break;
|
||||
case AMDGPU_HW_IP_GFX:
|
||||
sched = &adev->gfx.gfx_ring[0].sched;
|
||||
scheds = &sched;
|
||||
num_scheds = 1;
|
||||
break;
|
||||
case AMDGPU_HW_IP_COMPUTE:
|
||||
hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
|
||||
scheds = adev->gfx.compute_prio_sched[hw_prio];
|
||||
num_scheds = adev->gfx.num_compute_sched[hw_prio];
|
||||
break;
|
||||
case AMDGPU_HW_IP_DMA:
|
||||
scheds = adev->sdma.sdma_sched;
|
||||
num_scheds = adev->sdma.num_sdma_sched;
|
||||
break;
|
||||
case AMDGPU_HW_IP_UVD:
|
||||
sched = &adev->uvd.inst[0].ring.sched;
|
||||
scheds = &sched;
|
||||
num_scheds = 1;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCE:
|
||||
sched = &adev->vce.ring[0].sched;
|
||||
scheds = &sched;
|
||||
num_scheds = 1;
|
||||
break;
|
||||
case AMDGPU_HW_IP_UVD_ENC:
|
||||
sched = &adev->uvd.inst[0].ring_enc[0].sched;
|
||||
scheds = &sched;
|
||||
num_scheds = 1;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCN_DEC:
|
||||
sched = drm_sched_pick_best(adev->vcn.vcn_dec_sched,
|
||||
adev->vcn.num_vcn_dec_sched);
|
||||
scheds = &sched;
|
||||
num_scheds = 1;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCN_ENC:
|
||||
sched = drm_sched_pick_best(adev->vcn.vcn_enc_sched,
|
||||
adev->vcn.num_vcn_enc_sched);
|
||||
scheds = &sched;
|
||||
num_scheds = 1;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCN_JPEG:
|
||||
scheds = adev->jpeg.jpeg_sched;
|
||||
num_scheds = adev->jpeg.num_jpeg_sched;
|
||||
break;
|
||||
}
|
||||
|
||||
r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_pm.h"
|
||||
#include "amdgpu_dm_debugfs.h"
|
||||
#include "amdgpu_ras.h"
|
||||
|
||||
/**
|
||||
* amdgpu_debugfs_add_files - Add simple debugfs entries
|
||||
@ -178,7 +179,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
|
||||
} else {
|
||||
r = get_user(value, (uint32_t *)buf);
|
||||
if (!r)
|
||||
WREG32(*pos >> 2, value);
|
||||
amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0);
|
||||
}
|
||||
if (r) {
|
||||
result = r;
|
||||
@ -783,11 +784,11 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
|
||||
ssize_t result = 0;
|
||||
uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
|
||||
|
||||
if (size & 3 || *pos & 3)
|
||||
if (size > 4096 || size & 3 || *pos & 3)
|
||||
return -EINVAL;
|
||||
|
||||
/* decode offset */
|
||||
offset = *pos & GENMASK_ULL(11, 0);
|
||||
offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
|
||||
se = (*pos & GENMASK_ULL(19, 12)) >> 12;
|
||||
sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
|
||||
cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
|
||||
@ -825,7 +826,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
|
||||
while (size) {
|
||||
uint32_t value;
|
||||
|
||||
value = data[offset++];
|
||||
value = data[result >> 2];
|
||||
r = put_user(value, (uint32_t *)buf);
|
||||
if (r) {
|
||||
result = r;
|
||||
@ -1294,7 +1295,6 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL,
|
||||
DEFINE_SIMPLE_ATTRIBUTE(fops_sclk_set, NULL,
|
||||
amdgpu_debugfs_sclk_set, "%llu\n");
|
||||
|
||||
extern void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev);
|
||||
int amdgpu_debugfs_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r, i;
|
||||
|
@ -306,28 +306,10 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
|
||||
BUG();
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_mm_wreg - write to a memory mapped IO register
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @reg: dword aligned register offset
|
||||
* @v: 32 bit value to write to the register
|
||||
* @acc_flags: access flags which require special behavior
|
||||
*
|
||||
* Writes the value specified to the offset specified.
|
||||
*/
|
||||
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
|
||||
uint32_t acc_flags)
|
||||
void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags)
|
||||
{
|
||||
trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
|
||||
|
||||
if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
|
||||
adev->last_mm_index = v;
|
||||
}
|
||||
|
||||
if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
|
||||
return amdgpu_kiq_wreg(adev, reg, v);
|
||||
|
||||
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
|
||||
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
|
||||
else {
|
||||
@ -344,6 +326,48 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_mm_wreg - write to a memory mapped IO register
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @reg: dword aligned register offset
|
||||
* @v: 32 bit value to write to the register
|
||||
* @acc_flags: access flags which require special behavior
|
||||
*
|
||||
* Writes the value specified to the offset specified.
|
||||
*/
|
||||
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
|
||||
uint32_t acc_flags)
|
||||
{
|
||||
if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
|
||||
adev->last_mm_index = v;
|
||||
}
|
||||
|
||||
if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
|
||||
return amdgpu_kiq_wreg(adev, reg, v);
|
||||
|
||||
amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range
|
||||
*
|
||||
* this function is invoked only the debugfs register access
|
||||
* */
|
||||
void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
|
||||
uint32_t acc_flags)
|
||||
{
|
||||
if (amdgpu_sriov_fullaccess(adev) &&
|
||||
adev->gfx.rlc.funcs &&
|
||||
adev->gfx.rlc.funcs->is_rlcg_access_range) {
|
||||
|
||||
if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
|
||||
return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
|
||||
}
|
||||
|
||||
amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_io_rreg - read an IO register
|
||||
*
|
||||
@ -3933,6 +3957,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
amdgpu_fbdev_set_suspend(tmp_adev, 0);
|
||||
|
||||
/* must succeed. */
|
||||
amdgpu_ras_resume(tmp_adev);
|
||||
|
||||
@ -4106,6 +4132,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
*/
|
||||
amdgpu_unregister_gpu_instance(tmp_adev);
|
||||
|
||||
amdgpu_fbdev_set_suspend(adev, 1);
|
||||
|
||||
/* disable ras on ALL IPs */
|
||||
if (!(in_ras_intr && !use_baco) &&
|
||||
amdgpu_device_ip_need_full_reset(tmp_adev))
|
||||
|
@ -448,6 +448,8 @@ struct amdgpu_pm {
|
||||
/* powerplay feature */
|
||||
uint32_t pp_feature;
|
||||
|
||||
/* Used for I2C access to various EEPROMs on relevant ASICs */
|
||||
struct i2c_adapter smu_i2c;
|
||||
};
|
||||
|
||||
#define R600_SSTU_DFLT 0
|
||||
|
@ -159,6 +159,10 @@ static int psp_sw_fini(void *handle)
|
||||
adev->psp.sos_fw = NULL;
|
||||
release_firmware(adev->psp.asd_fw);
|
||||
adev->psp.asd_fw = NULL;
|
||||
if (adev->psp.cap_fw) {
|
||||
release_firmware(adev->psp.cap_fw);
|
||||
adev->psp.cap_fw = NULL;
|
||||
}
|
||||
if (adev->psp.ta_fw) {
|
||||
release_firmware(adev->psp.ta_fw);
|
||||
adev->psp.ta_fw = NULL;
|
||||
@ -200,6 +204,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
|
||||
int ret;
|
||||
int index;
|
||||
int timeout = 2000;
|
||||
bool ras_intr = false;
|
||||
|
||||
mutex_lock(&psp->mutex);
|
||||
|
||||
@ -224,7 +229,8 @@ psp_cmd_submit_buf(struct psp_context *psp,
|
||||
* because gpu reset thread triggered and lock resource should
|
||||
* be released for psp resume sequence.
|
||||
*/
|
||||
if (amdgpu_ras_intr_triggered())
|
||||
ras_intr = amdgpu_ras_intr_triggered();
|
||||
if (ras_intr)
|
||||
break;
|
||||
msleep(1);
|
||||
amdgpu_asic_invalidate_hdp(psp->adev, NULL);
|
||||
@ -237,14 +243,14 @@ psp_cmd_submit_buf(struct psp_context *psp,
|
||||
* during psp initialization to avoid breaking hw_init and it doesn't
|
||||
* return -EINVAL.
|
||||
*/
|
||||
if (psp->cmd_buf_mem->resp.status || !timeout) {
|
||||
if ((psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
|
||||
if (ucode)
|
||||
DRM_WARN("failed to load ucode id (%d) ",
|
||||
ucode->ucode_id);
|
||||
DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n",
|
||||
psp->cmd_buf_mem->cmd_id,
|
||||
psp->cmd_buf_mem->resp.status);
|
||||
if (!timeout) {
|
||||
if ((ucode->ucode_id == AMDGPU_UCODE_ID_CAP) || !timeout) {
|
||||
mutex_unlock(&psp->mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1186,6 +1192,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
|
||||
enum psp_gfx_fw_type *type)
|
||||
{
|
||||
switch (ucode->ucode_id) {
|
||||
case AMDGPU_UCODE_ID_CAP:
|
||||
*type = GFX_FW_TYPE_CAP;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_SDMA0:
|
||||
*type = GFX_FW_TYPE_SDMA0;
|
||||
break;
|
||||
|
@ -252,6 +252,9 @@ struct psp_context
|
||||
uint32_t asd_ucode_size;
|
||||
uint8_t *asd_start_addr;
|
||||
|
||||
/* cap firmware */
|
||||
const struct firmware *cap_fw;
|
||||
|
||||
/* fence buffer */
|
||||
struct amdgpu_bo *fence_buf_bo;
|
||||
uint64_t fence_buf_mc_addr;
|
||||
|
@ -1116,7 +1116,7 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
|
||||
void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||
struct ras_manager *obj, *tmp;
|
||||
struct ras_manager *obj;
|
||||
struct ras_fs_if fs_info;
|
||||
|
||||
/*
|
||||
@ -1128,10 +1128,7 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
|
||||
|
||||
amdgpu_ras_debugfs_create_ctrl_node(adev);
|
||||
|
||||
list_for_each_entry_safe(obj, tmp, &con->head, node) {
|
||||
if (!obj)
|
||||
continue;
|
||||
|
||||
list_for_each_entry(obj, &con->head, node) {
|
||||
if (amdgpu_ras_is_supported(adev, obj->head.block) &&
|
||||
(obj->attr_inuse == 1)) {
|
||||
sprintf(fs_info.debugfs_name, "%s_err_inject",
|
||||
@ -1765,18 +1762,30 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
|
||||
*hw_supported = 0;
|
||||
*supported = 0;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) ||
|
||||
if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
|
||||
(adev->asic_type != CHIP_VEGA20 &&
|
||||
adev->asic_type != CHIP_ARCTURUS))
|
||||
return;
|
||||
|
||||
if (adev->is_atom_fw &&
|
||||
(amdgpu_atomfirmware_mem_ecc_supported(adev) ||
|
||||
amdgpu_atomfirmware_sram_ecc_supported(adev)))
|
||||
*hw_supported = AMDGPU_RAS_BLOCK_MASK;
|
||||
if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
|
||||
DRM_INFO("HBM ECC is active.\n");
|
||||
*hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
|
||||
1 << AMDGPU_RAS_BLOCK__DF);
|
||||
} else
|
||||
DRM_INFO("HBM ECC is not presented.\n");
|
||||
|
||||
if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
|
||||
DRM_INFO("SRAM ECC is active.\n");
|
||||
*hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
|
||||
1 << AMDGPU_RAS_BLOCK__DF);
|
||||
} else
|
||||
DRM_INFO("SRAM ECC is not presented.\n");
|
||||
|
||||
/* hw_supported needs to be aligned with RAS block mask. */
|
||||
*hw_supported &= AMDGPU_RAS_BLOCK_MASK;
|
||||
|
||||
*supported = amdgpu_ras_enable == 0 ?
|
||||
0 : *hw_supported & amdgpu_ras_mask;
|
||||
0 : *hw_supported & amdgpu_ras_mask;
|
||||
}
|
||||
|
||||
int amdgpu_ras_init(struct amdgpu_device *adev)
|
||||
|
@ -25,7 +25,6 @@
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_ras.h"
|
||||
#include <linux/bits.h>
|
||||
#include "smu_v11_0_i2c.h"
|
||||
#include "atom.h"
|
||||
|
||||
#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
|
||||
@ -124,6 +123,7 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
|
||||
unsigned char *buff)
|
||||
{
|
||||
int ret = 0;
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
struct i2c_msg msg = {
|
||||
.addr = 0,
|
||||
.flags = 0,
|
||||
@ -137,7 +137,7 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
|
||||
|
||||
msg.addr = control->i2c_address;
|
||||
|
||||
ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
|
||||
ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
|
||||
if (ret < 1)
|
||||
DRM_ERROR("Failed to write EEPROM table header, ret:%d", ret);
|
||||
|
||||
@ -251,33 +251,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
|
||||
.buf = buff,
|
||||
};
|
||||
|
||||
/* Verify i2c adapter is initialized */
|
||||
if (!adev->pm.smu_i2c.algo)
|
||||
return -ENOENT;
|
||||
|
||||
if (!__get_eeprom_i2c_addr(adev, &control->i2c_address))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_init(&control->tbl_mutex);
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VEGA20:
|
||||
ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor);
|
||||
break;
|
||||
|
||||
case CHIP_ARCTURUS:
|
||||
ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor);
|
||||
break;
|
||||
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to init I2C controller, ret:%d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
msg.addr = control->i2c_address;
|
||||
|
||||
/* Read/Create table header from EEPROM address 0 */
|
||||
ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
|
||||
ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
|
||||
if (ret < 1) {
|
||||
DRM_ERROR("Failed to read EEPROM table header, ret:%d", ret);
|
||||
return ret;
|
||||
@ -303,23 +288,6 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
|
||||
return ret == 1 ? 0 : -EIO;
|
||||
}
|
||||
|
||||
void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VEGA20:
|
||||
smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor);
|
||||
break;
|
||||
case CHIP_ARCTURUS:
|
||||
smu_i2c_eeprom_fini(&adev->smu, &control->eeprom_accessor);
|
||||
break;
|
||||
|
||||
default:
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static void __encode_table_record_to_buff(struct amdgpu_ras_eeprom_control *control,
|
||||
struct eeprom_table_record *record,
|
||||
unsigned char *buff)
|
||||
@ -476,7 +444,7 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
|
||||
control->next_addr += EEPROM_TABLE_RECORD_SIZE;
|
||||
}
|
||||
|
||||
ret = i2c_transfer(&control->eeprom_accessor, msgs, num);
|
||||
ret = i2c_transfer(&adev->pm.smu_i2c, msgs, num);
|
||||
if (ret < 1) {
|
||||
DRM_ERROR("Failed to process EEPROM table records, ret:%d", ret);
|
||||
|
||||
|
@ -44,7 +44,6 @@ struct amdgpu_ras_eeprom_table_header {
|
||||
|
||||
struct amdgpu_ras_eeprom_control {
|
||||
struct amdgpu_ras_eeprom_table_header tbl_hdr;
|
||||
struct i2c_adapter eeprom_accessor;
|
||||
uint32_t next_addr;
|
||||
unsigned int num_recs;
|
||||
struct mutex tbl_mutex;
|
||||
@ -79,7 +78,6 @@ struct eeprom_table_record {
|
||||
}__attribute__((__packed__));
|
||||
|
||||
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control);
|
||||
void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control);
|
||||
int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control);
|
||||
|
||||
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
|
||||
|
@ -127,6 +127,8 @@ struct amdgpu_rlc_funcs {
|
||||
void (*reset)(struct amdgpu_device *adev);
|
||||
void (*start)(struct amdgpu_device *adev);
|
||||
void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid);
|
||||
void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v);
|
||||
bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg);
|
||||
};
|
||||
|
||||
struct amdgpu_rlc {
|
||||
|
@ -283,7 +283,8 @@ union amdgpu_firmware_header {
|
||||
* fw loading support
|
||||
*/
|
||||
enum AMDGPU_UCODE_ID {
|
||||
AMDGPU_UCODE_ID_SDMA0 = 0,
|
||||
AMDGPU_UCODE_ID_CAP = 0, /* CAP must be the 1st fw to be loaded */
|
||||
AMDGPU_UCODE_ID_SDMA0,
|
||||
AMDGPU_UCODE_ID_SDMA1,
|
||||
AMDGPU_UCODE_ID_SDMA2,
|
||||
AMDGPU_UCODE_ID_SDMA3,
|
||||
|
@ -270,6 +270,9 @@ struct amdgpu_virt {
|
||||
#define amdgpu_sriov_runtime(adev) \
|
||||
((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME)
|
||||
|
||||
#define amdgpu_sriov_fullaccess(adev) \
|
||||
(amdgpu_sriov_vf((adev)) && !amdgpu_sriov_runtime((adev)))
|
||||
|
||||
#define amdgpu_passthrough(adev) \
|
||||
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
|
||||
|
||||
|
@ -1446,7 +1446,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
|
||||
uint64_t incr, entry_end, pe_start;
|
||||
struct amdgpu_bo *pt;
|
||||
|
||||
if (flags & AMDGPU_PTE_VALID) {
|
||||
if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
|
||||
/* make sure that the page tables covering the
|
||||
* address range are actually allocated
|
||||
*/
|
||||
@ -1603,14 +1603,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
||||
goto error_unlock;
|
||||
}
|
||||
|
||||
if (flags & AMDGPU_PTE_VALID) {
|
||||
if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
|
||||
struct amdgpu_bo *root = vm->root.base.bo;
|
||||
|
||||
if (!dma_fence_is_signaled(vm->last_direct))
|
||||
amdgpu_bo_fence(root, vm->last_direct, true);
|
||||
|
||||
if (!dma_fence_is_signaled(vm->last_delayed))
|
||||
amdgpu_bo_fence(root, vm->last_delayed, true);
|
||||
}
|
||||
|
||||
r = vm->update_funcs->prepare(¶ms, resv, sync_mode);
|
||||
@ -1718,7 +1715,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
|
||||
AMDGPU_GPU_PAGES_IN_CPU_PAGE;
|
||||
}
|
||||
|
||||
} else if (flags & AMDGPU_PTE_VALID) {
|
||||
} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
|
||||
addr += bo_adev->vm_manager.vram_base_offset;
|
||||
addr += pfn << PAGE_SHIFT;
|
||||
}
|
||||
@ -2588,8 +2585,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
|
||||
return false;
|
||||
|
||||
/* Don't evict VM page tables while they are updated */
|
||||
if (!dma_fence_is_signaled(bo_base->vm->last_direct) ||
|
||||
!dma_fence_is_signaled(bo_base->vm->last_delayed)) {
|
||||
if (!dma_fence_is_signaled(bo_base->vm->last_direct)) {
|
||||
amdgpu_vm_eviction_unlock(bo_base->vm);
|
||||
return false;
|
||||
}
|
||||
@ -2766,11 +2762,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
|
||||
if (timeout <= 0)
|
||||
return timeout;
|
||||
|
||||
timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout);
|
||||
if (timeout <= 0)
|
||||
return timeout;
|
||||
|
||||
return dma_fence_wait_timeout(vm->last_delayed, true, timeout);
|
||||
return dma_fence_wait_timeout(vm->last_direct, true, timeout);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2843,7 +2835,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
vm->update_funcs = &amdgpu_vm_sdma_funcs;
|
||||
vm->last_update = NULL;
|
||||
vm->last_direct = dma_fence_get_stub();
|
||||
vm->last_delayed = dma_fence_get_stub();
|
||||
|
||||
mutex_init(&vm->eviction_lock);
|
||||
vm->evicting = false;
|
||||
@ -2898,7 +2889,6 @@ error_free_root:
|
||||
|
||||
error_free_delayed:
|
||||
dma_fence_put(vm->last_direct);
|
||||
dma_fence_put(vm->last_delayed);
|
||||
drm_sched_entity_destroy(&vm->delayed);
|
||||
|
||||
error_free_direct:
|
||||
@ -3101,8 +3091,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
|
||||
dma_fence_wait(vm->last_direct, false);
|
||||
dma_fence_put(vm->last_direct);
|
||||
dma_fence_wait(vm->last_delayed, false);
|
||||
dma_fence_put(vm->last_delayed);
|
||||
|
||||
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
|
||||
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
|
||||
|
@ -276,7 +276,6 @@ struct amdgpu_vm {
|
||||
|
||||
/* Last submission to the scheduler entities */
|
||||
struct dma_fence *last_direct;
|
||||
struct dma_fence *last_delayed;
|
||||
|
||||
unsigned int pasid;
|
||||
/* dedicated to vm */
|
||||
|
@ -104,12 +104,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
tmp = dma_fence_get(f);
|
||||
if (p->direct)
|
||||
if (p->direct) {
|
||||
tmp = dma_fence_get(f);
|
||||
swap(p->vm->last_direct, tmp);
|
||||
else
|
||||
swap(p->vm->last_delayed, tmp);
|
||||
dma_fence_put(tmp);
|
||||
dma_fence_put(tmp);
|
||||
} else {
|
||||
dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv, f);
|
||||
}
|
||||
|
||||
if (fence && !p->direct)
|
||||
swap(*fence, f);
|
||||
|
@ -224,6 +224,49 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
|
||||
};
|
||||
|
||||
static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
|
||||
{
|
||||
static void *scratch_reg0;
|
||||
static void *scratch_reg1;
|
||||
static void *scratch_reg2;
|
||||
static void *scratch_reg3;
|
||||
static void *spare_int;
|
||||
static uint32_t grbm_cntl;
|
||||
static uint32_t grbm_idx;
|
||||
uint32_t i = 0;
|
||||
uint32_t retries = 50000;
|
||||
|
||||
scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
|
||||
scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
|
||||
scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
|
||||
scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
|
||||
spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
|
||||
|
||||
grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
|
||||
grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
|
||||
|
||||
if (amdgpu_sriov_runtime(adev)) {
|
||||
pr_err("shouldn't call rlcg write register during runtime\n");
|
||||
return;
|
||||
}
|
||||
|
||||
writel(v, scratch_reg0);
|
||||
writel(offset | 0x80000000, scratch_reg1);
|
||||
writel(1, spare_int);
|
||||
for (i = 0; i < retries; i++) {
|
||||
u32 tmp;
|
||||
|
||||
tmp = readl(scratch_reg1);
|
||||
if (!(tmp & 0x80000000))
|
||||
break;
|
||||
|
||||
udelay(10);
|
||||
}
|
||||
|
||||
if (i >= retries)
|
||||
pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
|
||||
}
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
|
||||
{
|
||||
/* Pending on emulation bring up */
|
||||
@ -4247,6 +4290,33 @@ static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
|
||||
WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
|
||||
}
|
||||
|
||||
static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev,
|
||||
uint32_t offset,
|
||||
struct soc15_reg_rlcg *entries, int arr_size)
|
||||
{
|
||||
int i;
|
||||
uint32_t reg;
|
||||
|
||||
if (!entries)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < arr_size; i++) {
|
||||
const struct soc15_reg_rlcg *entry;
|
||||
|
||||
entry = &entries[i];
|
||||
reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
|
||||
if (offset == reg)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool gfx_v10_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
|
||||
{
|
||||
return gfx_v10_0_check_rlcg_range(adev, offset, NULL, 0);
|
||||
}
|
||||
|
||||
static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
|
||||
.is_rlc_enabled = gfx_v10_0_is_rlc_enabled,
|
||||
.set_safe_mode = gfx_v10_0_set_safe_mode,
|
||||
@ -4258,7 +4328,9 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
|
||||
.stop = gfx_v10_0_rlc_stop,
|
||||
.reset = gfx_v10_0_rlc_reset,
|
||||
.start = gfx_v10_0_rlc_start,
|
||||
.update_spm_vmid = gfx_v10_0_update_spm_vmid
|
||||
.update_spm_vmid = gfx_v10_0_update_spm_vmid,
|
||||
.rlcg_wreg = gfx_v10_rlcg_wreg,
|
||||
.is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
|
||||
};
|
||||
|
||||
static int gfx_v10_0_set_powergating_state(void *handle,
|
||||
|
@ -697,6 +697,11 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
|
||||
};
|
||||
|
||||
static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
|
||||
{SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
|
||||
{SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
|
||||
};
|
||||
|
||||
static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
|
||||
{
|
||||
mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
|
||||
@ -721,6 +726,59 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
|
||||
mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
|
||||
};
|
||||
|
||||
void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
|
||||
{
|
||||
static void *scratch_reg0;
|
||||
static void *scratch_reg1;
|
||||
static void *scratch_reg2;
|
||||
static void *scratch_reg3;
|
||||
static void *spare_int;
|
||||
static uint32_t grbm_cntl;
|
||||
static uint32_t grbm_idx;
|
||||
|
||||
scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
|
||||
scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
|
||||
scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
|
||||
scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
|
||||
spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
|
||||
|
||||
grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
|
||||
grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
|
||||
|
||||
if (amdgpu_sriov_runtime(adev)) {
|
||||
pr_err("shouldn't call rlcg write register during runtime\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (offset == grbm_cntl || offset == grbm_idx) {
|
||||
if (offset == grbm_cntl)
|
||||
writel(v, scratch_reg2);
|
||||
else if (offset == grbm_idx)
|
||||
writel(v, scratch_reg3);
|
||||
|
||||
writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
|
||||
} else {
|
||||
uint32_t i = 0;
|
||||
uint32_t retries = 50000;
|
||||
|
||||
writel(v, scratch_reg0);
|
||||
writel(offset | 0x80000000, scratch_reg1);
|
||||
writel(1, spare_int);
|
||||
for (i = 0; i < retries; i++) {
|
||||
u32 tmp;
|
||||
|
||||
tmp = readl(scratch_reg1);
|
||||
if (!(tmp & 0x80000000))
|
||||
break;
|
||||
|
||||
udelay(10);
|
||||
}
|
||||
if (i >= retries)
|
||||
pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
|
||||
#define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
|
||||
#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
|
||||
@ -1921,7 +1979,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
|
||||
|
||||
static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
|
||||
{
|
||||
WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
|
||||
WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
|
||||
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
|
||||
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
|
||||
(address << SQ_IND_INDEX__INDEX__SHIFT) |
|
||||
@ -1933,7 +1991,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
|
||||
uint32_t wave, uint32_t thread,
|
||||
uint32_t regno, uint32_t num, uint32_t *out)
|
||||
{
|
||||
WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
|
||||
WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
|
||||
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
|
||||
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
|
||||
(regno << SQ_IND_INDEX__INDEX__SHIFT) |
|
||||
@ -4128,6 +4186,101 @@ static const u32 sgpr_init_compute_shader[] =
|
||||
0xbe800080, 0xbf810000,
|
||||
};
|
||||
|
||||
static const u32 vgpr_init_compute_shader_arcturus[] = {
|
||||
0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
|
||||
0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
|
||||
0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
|
||||
0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
|
||||
0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
|
||||
0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
|
||||
0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
|
||||
0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
|
||||
0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
|
||||
0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
|
||||
0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
|
||||
0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
|
||||
0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
|
||||
0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
|
||||
0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
|
||||
0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
|
||||
0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
|
||||
0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
|
||||
0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
|
||||
0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
|
||||
0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
|
||||
0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
|
||||
0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
|
||||
0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
|
||||
0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
|
||||
0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
|
||||
0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
|
||||
0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
|
||||
0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
|
||||
0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
|
||||
0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
|
||||
0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
|
||||
0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
|
||||
0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
|
||||
0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
|
||||
0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
|
||||
0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
|
||||
0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
|
||||
0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
|
||||
0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
|
||||
0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
|
||||
0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
|
||||
0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
|
||||
0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
|
||||
0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
|
||||
0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
|
||||
0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
|
||||
0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
|
||||
0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
|
||||
0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
|
||||
0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
|
||||
0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
|
||||
0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
|
||||
0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
|
||||
0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
|
||||
0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
|
||||
0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
|
||||
0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
|
||||
0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
|
||||
0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
|
||||
0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
|
||||
0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
|
||||
0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
|
||||
0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
|
||||
0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
|
||||
0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
|
||||
0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
|
||||
0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
|
||||
0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
|
||||
0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
|
||||
0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
|
||||
0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
|
||||
0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
|
||||
0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
|
||||
0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
|
||||
0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
|
||||
0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
|
||||
0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
|
||||
0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
|
||||
0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
|
||||
0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
|
||||
0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
|
||||
0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
|
||||
0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
|
||||
0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
|
||||
0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
|
||||
0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
|
||||
0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
|
||||
0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
|
||||
0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
|
||||
0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
|
||||
0xbf84fff8, 0xbf810000,
|
||||
};
|
||||
|
||||
/* When below register arrays changed, please update gpr_reg_size,
|
||||
and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
|
||||
to cover all gfx9 ASICs */
|
||||
@ -4148,6 +4301,23 @@ static const struct soc15_reg_entry vgpr_init_regs[] = {
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
|
||||
};
|
||||
|
||||
static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x81 },
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
|
||||
};
|
||||
|
||||
static const struct soc15_reg_entry sgpr1_init_regs[] = {
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
|
||||
@ -4278,7 +4448,10 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
|
||||
adev->gfx.config.max_cu_per_sh *
|
||||
adev->gfx.config.max_sh_per_se;
|
||||
int sgpr_work_group_size = 5;
|
||||
int gpr_reg_size = compute_dim_x / 16 + 6;
|
||||
int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
|
||||
int vgpr_init_shader_size;
|
||||
const u32 *vgpr_init_shader_ptr;
|
||||
const struct soc15_reg_entry *vgpr_init_regs_ptr;
|
||||
|
||||
/* only support when RAS is enabled */
|
||||
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
|
||||
@ -4288,6 +4461,16 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
|
||||
if (!ring->sched.ready)
|
||||
return 0;
|
||||
|
||||
if (adev->asic_type == CHIP_ARCTURUS) {
|
||||
vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
|
||||
vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
|
||||
vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
|
||||
} else {
|
||||
vgpr_init_shader_ptr = vgpr_init_compute_shader;
|
||||
vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
|
||||
vgpr_init_regs_ptr = vgpr_init_regs;
|
||||
}
|
||||
|
||||
total_size =
|
||||
(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
|
||||
total_size +=
|
||||
@ -4296,7 +4479,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
|
||||
(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
|
||||
total_size = ALIGN(total_size, 256);
|
||||
vgpr_offset = total_size;
|
||||
total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
|
||||
total_size += ALIGN(vgpr_init_shader_size, 256);
|
||||
sgpr_offset = total_size;
|
||||
total_size += sizeof(sgpr_init_compute_shader);
|
||||
|
||||
@ -4309,8 +4492,8 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
/* load the compute shaders */
|
||||
for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
|
||||
ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
|
||||
for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
|
||||
ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
|
||||
ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
|
||||
@ -4322,9 +4505,9 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
|
||||
/* write the register state for the compute dispatch */
|
||||
for (i = 0; i < gpr_reg_size; i++) {
|
||||
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
|
||||
ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs[i])
|
||||
ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
|
||||
- PACKET3_SET_SH_REG_START;
|
||||
ib.ptr[ib.length_dw++] = vgpr_init_regs[i].reg_value;
|
||||
ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
|
||||
}
|
||||
/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
|
||||
gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
|
||||
@ -4336,7 +4519,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
|
||||
|
||||
/* write dispatch packet */
|
||||
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
|
||||
ib.ptr[ib.length_dw++] = compute_dim_x; /* x */
|
||||
ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
|
||||
ib.ptr[ib.length_dw++] = 1; /* y */
|
||||
ib.ptr[ib.length_dw++] = 1; /* z */
|
||||
ib.ptr[ib.length_dw++] =
|
||||
@ -4783,6 +4966,35 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
|
||||
WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
|
||||
}
|
||||
|
||||
static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
|
||||
uint32_t offset,
|
||||
struct soc15_reg_rlcg *entries, int arr_size)
|
||||
{
|
||||
int i;
|
||||
uint32_t reg;
|
||||
|
||||
if (!entries)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < arr_size; i++) {
|
||||
const struct soc15_reg_rlcg *entry;
|
||||
|
||||
entry = &entries[i];
|
||||
reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
|
||||
if (offset == reg)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
|
||||
{
|
||||
return gfx_v9_0_check_rlcg_range(adev, offset,
|
||||
(void *)rlcg_access_gc_9_0,
|
||||
ARRAY_SIZE(rlcg_access_gc_9_0));
|
||||
}
|
||||
|
||||
static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
|
||||
.is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
|
||||
.set_safe_mode = gfx_v9_0_set_safe_mode,
|
||||
@ -4795,7 +5007,9 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
|
||||
.stop = gfx_v9_0_rlc_stop,
|
||||
.reset = gfx_v9_0_rlc_reset,
|
||||
.start = gfx_v9_0_rlc_start,
|
||||
.update_spm_vmid = gfx_v9_0_update_spm_vmid
|
||||
.update_spm_vmid = gfx_v9_0_update_spm_vmid,
|
||||
.rlcg_wreg = gfx_v9_0_rlcg_wreg,
|
||||
.is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
|
||||
};
|
||||
|
||||
static int gfx_v9_0_set_powergating_state(void *handle,
|
||||
@ -6306,6 +6520,9 @@ static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j, k;
|
||||
|
||||
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
|
||||
return;
|
||||
|
||||
/* read back registers to clear the counters */
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
|
||||
|
@ -897,6 +897,9 @@ void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j, k;
|
||||
|
||||
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
|
||||
return;
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) {
|
||||
for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) {
|
||||
|
@ -922,30 +922,20 @@ static int gmc_v9_0_late_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
/* Check if ecc is available */
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VEGA10:
|
||||
case CHIP_VEGA20:
|
||||
case CHIP_ARCTURUS:
|
||||
r = amdgpu_atomfirmware_mem_ecc_supported(adev);
|
||||
if (!r) {
|
||||
DRM_INFO("ECC is not present.\n");
|
||||
if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
|
||||
adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
|
||||
} else {
|
||||
DRM_INFO("ECC is active.\n");
|
||||
}
|
||||
if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
|
||||
r = amdgpu_atomfirmware_mem_ecc_supported(adev);
|
||||
if (!r) {
|
||||
DRM_INFO("ECC is not present.\n");
|
||||
if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
|
||||
adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
|
||||
} else
|
||||
DRM_INFO("ECC is active.\n");
|
||||
|
||||
r = amdgpu_atomfirmware_sram_ecc_supported(adev);
|
||||
if (!r) {
|
||||
DRM_INFO("SRAM ECC is not present.\n");
|
||||
} else {
|
||||
DRM_INFO("SRAM ECC is active.\n");
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
r = amdgpu_atomfirmware_sram_ecc_supported(adev);
|
||||
if (!r)
|
||||
DRM_INFO("SRAM ECC is not present.\n");
|
||||
else
|
||||
DRM_INFO("SRAM ECC is active.\n");
|
||||
}
|
||||
|
||||
if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
|
||||
|
@ -693,7 +693,7 @@ static int jpeg_v2_0_set_clockgating_state(void *handle,
|
||||
bool enable = (state == AMD_CG_STATE_GATE);
|
||||
|
||||
if (enable) {
|
||||
if (jpeg_v2_0_is_idle(handle))
|
||||
if (!jpeg_v2_0_is_idle(handle))
|
||||
return -EBUSY;
|
||||
jpeg_v2_0_enable_clock_gating(adev);
|
||||
} else {
|
||||
|
@ -477,7 +477,7 @@ static int jpeg_v2_5_set_clockgating_state(void *handle,
|
||||
continue;
|
||||
|
||||
if (enable) {
|
||||
if (jpeg_v2_5_is_idle(handle))
|
||||
if (!jpeg_v2_5_is_idle(handle))
|
||||
return -EBUSY;
|
||||
jpeg_v2_5_enable_clock_gating(adev, i);
|
||||
} else {
|
||||
|
@ -246,6 +246,7 @@ enum psp_gfx_fw_type {
|
||||
GFX_FW_TYPE_SDMA6 = 56, /* SDMA6 MI */
|
||||
GFX_FW_TYPE_SDMA7 = 57, /* SDMA7 MI */
|
||||
GFX_FW_TYPE_VCN1 = 58, /* VCN1 MI */
|
||||
GFX_FW_TYPE_CAP = 62, /* CAP_FW VG */
|
||||
GFX_FW_TYPE_MAX
|
||||
};
|
||||
|
||||
|
@ -26,6 +26,7 @@
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_psp.h"
|
||||
#include "amdgpu_ras.h"
|
||||
#include "amdgpu_ucode.h"
|
||||
#include "soc15_common.h"
|
||||
#include "psp_v11_0.h"
|
||||
@ -868,6 +869,11 @@ static int psp_v11_0_ras_trigger_error(struct psp_context *psp,
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
/* If err_event_athub occurs error inject was successful, however
|
||||
return status from TA is no long reliable */
|
||||
if (amdgpu_ras_intr_triggered())
|
||||
return 0;
|
||||
|
||||
return ras_cmd->ras_status;
|
||||
}
|
||||
|
||||
|
@ -44,6 +44,7 @@
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/vega10_sos.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega10_asd.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega10_cap.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega12_sos.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega12_asd.bin");
|
||||
|
||||
@ -63,6 +64,7 @@ static int psp_v3_1_init_microcode(struct psp_context *psp)
|
||||
char fw_name[30];
|
||||
int err = 0;
|
||||
const struct psp_firmware_header_v1_0 *hdr;
|
||||
struct amdgpu_firmware_info *info = NULL;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
@ -112,6 +114,26 @@ static int psp_v3_1_init_microcode(struct psp_context *psp)
|
||||
adev->psp.asd_start_addr = (uint8_t *)hdr +
|
||||
le32_to_cpu(hdr->header.ucode_array_offset_bytes);
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_VEGA10) {
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin",
|
||||
chip_name);
|
||||
err = request_firmware(&adev->psp.cap_fw, fw_name, adev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = amdgpu_ucode_validate(adev->psp.cap_fw);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_CAP;
|
||||
info->fw = adev->psp.cap_fw;
|
||||
hdr = (const struct psp_firmware_header_v1_0 *)
|
||||
adev->psp.cap_fw->data;
|
||||
adev->firmware.fw_size += ALIGN(
|
||||
le32_to_cpu(hdr->header.ucode_size_bytes), PAGE_SIZE);
|
||||
}
|
||||
|
||||
return 0;
|
||||
out:
|
||||
if (err) {
|
||||
@ -122,6 +144,8 @@ out:
|
||||
adev->psp.sos_fw = NULL;
|
||||
release_firmware(adev->psp.asd_fw);
|
||||
adev->psp.asd_fw = NULL;
|
||||
release_firmware(adev->psp.cap_fw);
|
||||
adev->psp.cap_fw = NULL;
|
||||
}
|
||||
|
||||
return err;
|
||||
|
@ -46,8 +46,7 @@
|
||||
#define I2C_NO_STOP 1
|
||||
#define I2C_RESTART 2
|
||||
|
||||
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev
|
||||
#define to_eeprom_control(x) container_of(x, struct amdgpu_ras_eeprom_control, eeprom_accessor)
|
||||
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
|
||||
|
||||
static void smu_v11_0_i2c_set_clock_gating(struct i2c_adapter *control, bool en)
|
||||
{
|
||||
@ -592,7 +591,8 @@ static uint32_t smu_v11_0_i2c_eeprom_write_data(struct i2c_adapter *control,
|
||||
|
||||
static void lock_bus(struct i2c_adapter *i2c, unsigned int flags)
|
||||
{
|
||||
struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c);
|
||||
struct amdgpu_device *adev = to_amdgpu_device(i2c);
|
||||
struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
|
||||
|
||||
if (!smu_v11_0_i2c_bus_lock(i2c)) {
|
||||
DRM_ERROR("Failed to lock the bus from SMU");
|
||||
@ -610,7 +610,8 @@ static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags)
|
||||
|
||||
static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags)
|
||||
{
|
||||
struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c);
|
||||
struct amdgpu_device *adev = to_amdgpu_device(i2c);
|
||||
struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
|
||||
|
||||
if (!smu_v11_0_i2c_bus_unlock(i2c)) {
|
||||
DRM_ERROR("Failed to unlock the bus from SMU");
|
||||
@ -630,7 +631,8 @@ static int smu_v11_0_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
struct i2c_msg *msgs, int num)
|
||||
{
|
||||
int i, ret;
|
||||
struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c_adap);
|
||||
struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
|
||||
struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
|
||||
|
||||
if (!control->bus_locked) {
|
||||
DRM_ERROR("I2C bus unlocked, stopping transaction!");
|
||||
@ -679,7 +681,7 @@ int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control)
|
||||
control->class = I2C_CLASS_SPD;
|
||||
control->dev.parent = &adev->pdev->dev;
|
||||
control->algo = &smu_v11_0_i2c_eeprom_i2c_algo;
|
||||
snprintf(control->name, sizeof(control->name), "RAS EEPROM");
|
||||
snprintf(control->name, sizeof(control->name), "AMDGPU EEPROM");
|
||||
control->lock_ops = &smu_v11_0_i2c_i2c_lock_ops;
|
||||
|
||||
res = i2c_add_adapter(control);
|
||||
|
@ -42,6 +42,13 @@ struct soc15_reg_golden {
|
||||
u32 or_mask;
|
||||
};
|
||||
|
||||
struct soc15_reg_rlcg {
|
||||
u32 hwip;
|
||||
u32 instance;
|
||||
u32 segment;
|
||||
u32 reg;
|
||||
};
|
||||
|
||||
struct soc15_reg_entry {
|
||||
uint32_t hwip;
|
||||
uint32_t inst;
|
||||
|
@ -70,10 +70,9 @@
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(a) (amdgpu_sriov_vf((a)) && !amdgpu_sriov_runtime((a)))
|
||||
#define WREG32_RLC(reg, value) \
|
||||
do { \
|
||||
if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \
|
||||
if (amdgpu_sriov_fullaccess(adev)) { \
|
||||
uint32_t i = 0; \
|
||||
uint32_t retries = 50000; \
|
||||
uint32_t r0 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0; \
|
||||
@ -98,7 +97,7 @@
|
||||
#define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \
|
||||
do { \
|
||||
uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
|
||||
if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \
|
||||
if (amdgpu_sriov_fullaccess(adev)) { \
|
||||
uint32_t r2 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2; \
|
||||
uint32_t r3 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3; \
|
||||
uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; \
|
||||
|
@ -1352,7 +1352,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle,
|
||||
|
||||
if (enable) {
|
||||
/* wait for STATUS to clear */
|
||||
if (vcn_v1_0_is_idle(handle))
|
||||
if (!vcn_v1_0_is_idle(handle))
|
||||
return -EBUSY;
|
||||
vcn_v1_0_enable_clock_gating(adev);
|
||||
} else {
|
||||
|
@ -1251,7 +1251,7 @@ static int vcn_v2_0_set_clockgating_state(void *handle,
|
||||
|
||||
if (enable) {
|
||||
/* wait for STATUS to clear */
|
||||
if (vcn_v2_0_is_idle(handle))
|
||||
if (!vcn_v2_0_is_idle(handle))
|
||||
return -EBUSY;
|
||||
vcn_v2_0_enable_clock_gating(adev);
|
||||
} else {
|
||||
|
@ -74,29 +74,30 @@ static int amdgpu_ih_clientid_vcns[] = {
|
||||
static int vcn_v2_5_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
if (adev->asic_type == CHIP_ARCTURUS) {
|
||||
u32 harvest;
|
||||
int i;
|
||||
|
||||
adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
|
||||
harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
|
||||
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
|
||||
adev->vcn.harvest_config |= 1 << i;
|
||||
}
|
||||
|
||||
if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
|
||||
AMDGPU_VCN_HARVEST_VCN1))
|
||||
/* both instances are harvested, disable the block */
|
||||
return -ENOENT;
|
||||
} else
|
||||
adev->vcn.num_vcn_inst = 1;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
adev->vcn.num_vcn_inst = 2;
|
||||
adev->vcn.harvest_config = 0;
|
||||
adev->vcn.num_enc_rings = 1;
|
||||
} else {
|
||||
if (adev->asic_type == CHIP_ARCTURUS) {
|
||||
u32 harvest;
|
||||
int i;
|
||||
|
||||
adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
|
||||
harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
|
||||
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
|
||||
adev->vcn.harvest_config |= 1 << i;
|
||||
}
|
||||
|
||||
if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
|
||||
AMDGPU_VCN_HARVEST_VCN1))
|
||||
/* both instances are harvested, disable the block */
|
||||
return -ENOENT;
|
||||
} else
|
||||
adev->vcn.num_vcn_inst = 1;
|
||||
|
||||
adev->vcn.num_enc_rings = 2;
|
||||
}
|
||||
|
||||
@ -1672,7 +1673,7 @@ static int vcn_v2_5_set_clockgating_state(void *handle,
|
||||
return 0;
|
||||
|
||||
if (enable) {
|
||||
if (vcn_v2_5_is_idle(handle))
|
||||
if (!vcn_v2_5_is_idle(handle))
|
||||
return -EBUSY;
|
||||
vcn_v2_5_enable_clock_gating(adev);
|
||||
} else {
|
||||
|
@ -316,7 +316,7 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
|
||||
{
|
||||
/*
|
||||
* node id couldn't be 0 - the three MSB bits of
|
||||
* aperture shoudn't be 0
|
||||
* aperture shouldn't be 0
|
||||
*/
|
||||
pdd->lds_base = MAKE_LDS_APP_BASE_VI();
|
||||
pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
|
||||
|
@ -132,9 +132,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
|
||||
/* removes and deallocates the drm structures, created by the above function */
|
||||
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
|
||||
|
||||
static void
|
||||
amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
|
||||
|
||||
static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
|
||||
struct drm_plane *plane,
|
||||
unsigned long possible_crtcs,
|
||||
@ -410,8 +407,9 @@ static void dm_vupdate_high_irq(void *interrupt_params)
|
||||
if (acrtc) {
|
||||
acrtc_state = to_dm_crtc_state(acrtc->base.state);
|
||||
|
||||
DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
|
||||
amdgpu_dm_vrr_active(acrtc_state));
|
||||
DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
|
||||
acrtc->crtc_id,
|
||||
amdgpu_dm_vrr_active(acrtc_state));
|
||||
|
||||
/* Core vblank handling is done here after end of front-porch in
|
||||
* vrr mode, as vblank timestamping will give valid results
|
||||
@ -461,8 +459,9 @@ static void dm_crtc_high_irq(void *interrupt_params)
|
||||
if (acrtc) {
|
||||
acrtc_state = to_dm_crtc_state(acrtc->base.state);
|
||||
|
||||
DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
|
||||
amdgpu_dm_vrr_active(acrtc_state));
|
||||
DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
|
||||
acrtc->crtc_id,
|
||||
amdgpu_dm_vrr_active(acrtc_state));
|
||||
|
||||
/* Core vblank handling at start of front-porch is only possible
|
||||
* in non-vrr mode, as only there vblank timestamping will give
|
||||
@ -525,8 +524,8 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
|
||||
|
||||
acrtc_state = to_dm_crtc_state(acrtc->base.state);
|
||||
|
||||
DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
|
||||
amdgpu_dm_vrr_active(acrtc_state));
|
||||
DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
|
||||
amdgpu_dm_vrr_active(acrtc_state));
|
||||
|
||||
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
|
||||
drm_crtc_handle_vblank(&acrtc->base);
|
||||
@ -1895,8 +1894,8 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
|
||||
caps->aux_min_input_signal = min;
|
||||
}
|
||||
|
||||
static void
|
||||
amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
|
||||
void amdgpu_dm_update_connector_after_detect(
|
||||
struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
struct drm_connector *connector = &aconnector->base;
|
||||
struct drm_device *dev = connector->dev;
|
||||
@ -2225,10 +2224,10 @@ static void handle_hpd_rx_irq(void *param)
|
||||
}
|
||||
}
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
|
||||
if (adev->dm.hdcp_workqueue)
|
||||
hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
|
||||
}
|
||||
if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
|
||||
if (adev->dm.hdcp_workqueue)
|
||||
hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
|
||||
}
|
||||
#endif
|
||||
if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
|
||||
(dc_link->type == dc_connection_mst_branch))
|
||||
@ -3023,6 +3022,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
|
||||
dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
|
||||
|
||||
/* No userspace support. */
|
||||
dm->dc->debug.disable_tri_buf = true;
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
kfree(aencoder);
|
||||
@ -4314,9 +4316,22 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
struct dmcu *dmcu = core_dc->res_pool->dmcu;
|
||||
|
||||
stream->psr_version = dmcu->dmcu_version.psr_version;
|
||||
mod_build_vsc_infopacket(stream,
|
||||
&stream->vsc_infopacket,
|
||||
&stream->use_vsc_sdp_for_colorimetry);
|
||||
|
||||
//
|
||||
// should decide stream support vsc sdp colorimetry capability
|
||||
// before building vsc info packet
|
||||
//
|
||||
stream->use_vsc_sdp_for_colorimetry = false;
|
||||
if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
|
||||
stream->use_vsc_sdp_for_colorimetry =
|
||||
aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
|
||||
} else {
|
||||
if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
|
||||
stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
|
||||
stream->use_vsc_sdp_for_colorimetry = true;
|
||||
}
|
||||
}
|
||||
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
|
||||
}
|
||||
}
|
||||
finish:
|
||||
@ -6538,7 +6553,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
uint32_t target_vblank, last_flip_vblank;
|
||||
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
|
||||
bool pflip_present = false;
|
||||
bool swizzle = true;
|
||||
struct {
|
||||
struct dc_surface_update surface_updates[MAX_SURFACES];
|
||||
struct dc_plane_info plane_infos[MAX_SURFACES];
|
||||
@ -6584,9 +6598,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
|
||||
dc_plane = dm_new_plane_state->dc_state;
|
||||
|
||||
if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle)
|
||||
swizzle = false;
|
||||
|
||||
bundle->surface_updates[planes_count].surface = dc_plane;
|
||||
if (new_pcrtc_state->color_mgmt_changed) {
|
||||
bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
|
||||
@ -6795,8 +6806,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
amdgpu_dm_link_setup_psr(acrtc_state->stream);
|
||||
else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
|
||||
acrtc_state->stream->link->psr_feature_enabled &&
|
||||
!acrtc_state->stream->link->psr_allow_active &&
|
||||
swizzle) {
|
||||
!acrtc_state->stream->link->psr_allow_active) {
|
||||
amdgpu_dm_psr_enable(acrtc_state->stream);
|
||||
}
|
||||
|
||||
|
@ -483,6 +483,9 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
|
||||
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
|
||||
struct dc_plane_state *dc_plane_state);
|
||||
|
||||
void amdgpu_dm_update_connector_after_detect(
|
||||
struct amdgpu_dm_connector *aconnector);
|
||||
|
||||
extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
|
||||
|
||||
#endif /* __AMDGPU_DM_H__ */
|
||||
|
@ -580,6 +580,20 @@ enum dc_edid_status dm_helpers_read_local_edid(
|
||||
/* We don't need the original edid anymore */
|
||||
kfree(edid);
|
||||
|
||||
/* connector->display_info will be parsed from EDID and saved
|
||||
* into drm_connector->display_info from edid by call stack
|
||||
* below:
|
||||
* drm_parse_ycbcr420_deep_color_info
|
||||
* drm_parse_hdmi_forum_vsdb
|
||||
* drm_parse_cea_ext
|
||||
* drm_add_display_info
|
||||
* drm_connector_update_edid_property
|
||||
*
|
||||
* drm_connector->display_info will be used by amdgpu_dm funcs,
|
||||
* like fill_stream_properties_from_drm_display_mode
|
||||
*/
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
|
||||
edid_status = dm_helpers_parse_edid_caps(
|
||||
ctx,
|
||||
&sink->dc_edid,
|
||||
|
@ -267,7 +267,7 @@ static struct atom_display_object_path_v2 *get_bios_object(
|
||||
&& id.enum_id == obj_id.enum_id)
|
||||
return &bp->object_info_tbl.v1_4->display_path[i];
|
||||
}
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case OBJECT_TYPE_CONNECTOR:
|
||||
case OBJECT_TYPE_GENERIC:
|
||||
/* Both Generic and Connector Object ID
|
||||
@ -280,7 +280,7 @@ static struct atom_display_object_path_v2 *get_bios_object(
|
||||
&& id.enum_id == obj_id.enum_id)
|
||||
return &bp->object_info_tbl.v1_4->display_path[i];
|
||||
}
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
|
@ -115,12 +115,11 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
|
||||
dpp_inst = i;
|
||||
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
|
||||
|
||||
prev_dppclk_khz = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
|
||||
prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i];
|
||||
|
||||
if ((prev_dppclk_khz > dppclk_khz && safe_to_lower) || prev_dppclk_khz < dppclk_khz) {
|
||||
if (safe_to_lower || prev_dppclk_khz < dppclk_khz)
|
||||
clk_mgr->dccg->funcs->update_dpp_dto(
|
||||
clk_mgr->dccg, dpp_inst, dppclk_khz);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1365,7 +1365,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
|
||||
int i;
|
||||
struct dc_state *context = dc->current_state;
|
||||
|
||||
if ((!dc->clk_optimized_required && !dc->wm_optimized_required) || dc->optimize_seamless_boot_streams > 0)
|
||||
if ((!dc->optimized_required) || dc->optimize_seamless_boot_streams > 0)
|
||||
return true;
|
||||
|
||||
post_surface_trace(dc);
|
||||
@ -1379,7 +1379,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
|
||||
|
||||
dc->hwss.optimize_bandwidth(dc, context);
|
||||
|
||||
dc->clk_optimized_required = false;
|
||||
dc->optimized_required = false;
|
||||
dc->wm_optimized_required = false;
|
||||
|
||||
return true;
|
||||
@ -1828,11 +1828,12 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
|
||||
// If there's an available clock comparator, we use that.
|
||||
if (dc->clk_mgr->funcs->are_clock_states_equal) {
|
||||
if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
|
||||
dc->clk_optimized_required = true;
|
||||
dc->optimized_required = true;
|
||||
// Else we fallback to mem compare.
|
||||
} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
|
||||
dc->clk_optimized_required = true;
|
||||
}
|
||||
dc->optimized_required = true;
|
||||
} else if (dc->wm_optimized_required)
|
||||
dc->optimized_required = true;
|
||||
}
|
||||
|
||||
return type;
|
||||
@ -1871,6 +1872,8 @@ static void copy_surface_update_to_plane(
|
||||
surface->time.index++;
|
||||
if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
|
||||
surface->time.index = 0;
|
||||
|
||||
surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
|
||||
}
|
||||
|
||||
if (srf_update->scaling_info) {
|
||||
@ -2202,7 +2205,7 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
dc->optimize_seamless_boot_streams--;
|
||||
|
||||
if (dc->optimize_seamless_boot_streams == 0)
|
||||
dc->clk_optimized_required = true;
|
||||
dc->optimized_required = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3069,14 +3069,9 @@ void core_link_enable_stream(
|
||||
|
||||
if (pipe_ctx->stream->timing.flags.DSC) {
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
|
||||
dc_is_virtual_signal(pipe_ctx->stream->signal)) {
|
||||
/* Here we only need to enable DSC on RX. DSC HW programming
|
||||
* was done earlier, as part of timing programming.
|
||||
*/
|
||||
dp_set_dsc_on_rx(pipe_ctx, true);
|
||||
}
|
||||
dc_is_virtual_signal(pipe_ctx->stream->signal))
|
||||
dp_set_dsc_enable(pipe_ctx, true);
|
||||
}
|
||||
|
||||
dc->hwss.enable_stream(pipe_ctx);
|
||||
|
||||
/* Set DPS PPS SDP (AKA "info frames") */
|
||||
@ -3103,7 +3098,7 @@ void core_link_enable_stream(
|
||||
} else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
|
||||
dc_is_virtual_signal(pipe_ctx->stream->signal))
|
||||
dp_set_dsc_on_rx(pipe_ctx, true);
|
||||
dp_set_dsc_enable(pipe_ctx, true);
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -2674,9 +2674,12 @@ static void dp_test_send_link_test_pattern(struct dc_link *link)
|
||||
break;
|
||||
}
|
||||
|
||||
test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
|
||||
DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
|
||||
DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
|
||||
if (dpcd_test_params.bits.CLR_FORMAT == 0)
|
||||
test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB;
|
||||
else
|
||||
test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
|
||||
DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
|
||||
DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
|
||||
|
||||
dc_link_dp_set_test_pattern(
|
||||
link,
|
||||
@ -3438,6 +3441,17 @@ static bool retrieve_link_cap(struct dc_link *link)
|
||||
sink_id.ieee_device_id,
|
||||
sizeof(sink_id.ieee_device_id));
|
||||
|
||||
/* Quirk Apple MBP 2017 15" Retina panel: Wrong DP_MAX_LINK_RATE */
|
||||
{
|
||||
uint8_t str_mbp_2017[] = { 101, 68, 21, 101, 98, 97 };
|
||||
|
||||
if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
|
||||
!memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2017,
|
||||
sizeof(str_mbp_2017))) {
|
||||
link->reported_link_cap.link_rate = 0x0c;
|
||||
}
|
||||
}
|
||||
|
||||
core_link_read_dpcd(
|
||||
link,
|
||||
DP_SINK_HW_REVISION_START,
|
||||
|
@ -394,7 +394,7 @@ static void dsc_optc_config_log(struct display_stream_compressor *dsc,
|
||||
DC_LOG_DSC("\tslice_width %d", config->slice_width);
|
||||
}
|
||||
|
||||
bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
{
|
||||
struct dc *dc = pipe_ctx->stream->ctx->dc;
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
|
@ -893,6 +893,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
|
||||
int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
|
||||
|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
|
||||
bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
|
||||
int odm_idx = 0;
|
||||
|
||||
/*
|
||||
* Need to calculate the scan direction for viewport to make adjustments
|
||||
@ -924,11 +925,13 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
|
||||
* stream->dst.width / stream->src.width -
|
||||
src.x * plane_state->dst_rect.width / src.width
|
||||
* stream->dst.width / stream->src.width);
|
||||
/*modified recout_skip_h calculation due to odm having no recout offset caused by split*/
|
||||
/*modified recout_skip_h calculation due to odm having no recout offset*/
|
||||
while (odm_pipe) {
|
||||
recout_skip_h += odm_pipe->plane_res.scl_data.recout.width + odm_pipe->plane_res.scl_data.recout.x;
|
||||
odm_idx++;
|
||||
odm_pipe = odm_pipe->prev_odm_pipe;
|
||||
}
|
||||
if (odm_idx)
|
||||
recout_skip_h += odm_idx * data->recout.width;
|
||||
|
||||
recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
|
||||
* stream->dst.height / stream->src.height -
|
||||
|
@ -521,7 +521,7 @@ struct dc {
|
||||
struct dce_hwseq *hwseq;
|
||||
|
||||
/* Require to optimize clocks and bandwidth for added/removed planes */
|
||||
bool clk_optimized_required;
|
||||
bool optimized_required;
|
||||
bool wm_optimized_required;
|
||||
|
||||
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
|
||||
@ -872,6 +872,7 @@ struct dc_flip_addrs {
|
||||
unsigned int flip_timestamp_in_us;
|
||||
bool flip_immediate;
|
||||
/* TODO: add flip duration for FreeSync */
|
||||
bool triplebuffer_flips;
|
||||
};
|
||||
|
||||
bool dc_post_update_surfaces_to_stream(
|
||||
@ -1046,6 +1047,8 @@ struct dc_sink {
|
||||
struct dc_sink_dsc_caps dsc_caps;
|
||||
struct dc_sink_fec_caps fec_caps;
|
||||
|
||||
bool is_vsc_sdp_colorimetry_supported;
|
||||
|
||||
/* private to DC core */
|
||||
struct dc_link *link;
|
||||
struct dc_context *ctx;
|
||||
|
@ -645,7 +645,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
|
||||
case AUX_TRANSACTION_REPLY_AUX_DEFER:
|
||||
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
|
||||
retry_on_defer = true;
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
|
||||
if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) {
|
||||
goto fail;
|
||||
|
@ -267,6 +267,9 @@ static void set_speed(
|
||||
uint32_t xtal_ref_div = 0;
|
||||
uint32_t prescale = 0;
|
||||
|
||||
if (speed == 0)
|
||||
return;
|
||||
|
||||
REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
|
||||
|
||||
if (xtal_ref_div == 0)
|
||||
@ -274,17 +277,15 @@ static void set_speed(
|
||||
|
||||
prescale = ((dce_i2c_hw->reference_frequency * 2) / xtal_ref_div) / speed;
|
||||
|
||||
if (speed) {
|
||||
if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
|
||||
REG_UPDATE_N(SPEED, 3,
|
||||
FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
|
||||
FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
|
||||
FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
|
||||
else
|
||||
REG_UPDATE_N(SPEED, 2,
|
||||
FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
|
||||
FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
|
||||
}
|
||||
if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
|
||||
REG_UPDATE_N(SPEED, 3,
|
||||
FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
|
||||
FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
|
||||
FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
|
||||
else
|
||||
REG_UPDATE_N(SPEED, 2,
|
||||
FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
|
||||
FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
|
||||
}
|
||||
|
||||
static bool setup_engine(
|
||||
|
@ -479,7 +479,7 @@ static void program_grph_pixel_format(
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
|
||||
sign = 1;
|
||||
floating = 1;
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: /* shouldn't this get float too? */
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
|
||||
grph_depth = 3;
|
||||
|
@ -134,11 +134,9 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
if (res_ctx &&
|
||||
res_ctx->pipe_ctx[i].stream &&
|
||||
res_ctx->pipe_ctx[i].stream->link &&
|
||||
res_ctx->pipe_ctx[i].stream->link == link &&
|
||||
res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
|
||||
if (res_ctx->pipe_ctx[i].stream &&
|
||||
res_ctx->pipe_ctx[i].stream->link == link &&
|
||||
res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
|
||||
pipe_ctx = &res_ctx->pipe_ctx[i];
|
||||
break;
|
||||
}
|
||||
|
@ -1048,7 +1048,7 @@ void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
if (opp != NULL)
|
||||
opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
|
||||
|
||||
dc->clk_optimized_required = true;
|
||||
dc->optimized_required = true;
|
||||
|
||||
if (hubp->funcs->hubp_disconnect)
|
||||
hubp->funcs->hubp_disconnect(hubp);
|
||||
@ -1099,7 +1099,7 @@ void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
false);
|
||||
|
||||
hubp->power_gated = true;
|
||||
dc->clk_optimized_required = false; /* We're powering off, no need to optimize */
|
||||
dc->optimized_required = false; /* We're powering off, no need to optimize */
|
||||
|
||||
hws->funcs.plane_atomic_power_down(dc,
|
||||
pipe_ctx->plane_res.dpp,
|
||||
@ -1356,6 +1356,9 @@ void dcn10_init_hw(struct dc *dc)
|
||||
*/
|
||||
if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
|
||||
hws->funcs.init_pipes(dc, dc->current_state);
|
||||
if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
|
||||
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
|
||||
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
|
||||
}
|
||||
|
||||
for (i = 0; i < res_pool->audio_count; i++) {
|
||||
|
@ -62,11 +62,11 @@
|
||||
SRI(DP_DPHY_FAST_TRAINING, DP, id), \
|
||||
SRI(DP_SEC_CNTL1, DP, id), \
|
||||
SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
|
||||
SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
|
||||
SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
|
||||
|
||||
|
||||
#define LE_DCN10_REG_LIST(id)\
|
||||
SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
|
||||
LE_DCN_COMMON_REG_LIST(id)
|
||||
|
||||
struct dcn10_link_enc_aux_registers {
|
||||
|
@ -570,7 +570,7 @@ static const struct dc_plane_cap plane_cap = {
|
||||
|
||||
static const struct dc_debug_options debug_defaults_drv = {
|
||||
.sanity_checks = true,
|
||||
.disable_dmcu = true,
|
||||
.disable_dmcu = false,
|
||||
.force_abm_enable = false,
|
||||
.timing_trace = false,
|
||||
.clock_trace = true,
|
||||
@ -598,7 +598,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
};
|
||||
|
||||
static const struct dc_debug_options debug_defaults_diags = {
|
||||
.disable_dmcu = true,
|
||||
.disable_dmcu = false,
|
||||
.force_abm_enable = false,
|
||||
.timing_trace = true,
|
||||
.clock_trace = true,
|
||||
|
@ -70,6 +70,8 @@ void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
|
||||
REG_UPDATE(DPPCLK_DTO_CTRL,
|
||||
DPPCLK_DTO_ENABLE[dpp_inst], 0);
|
||||
}
|
||||
|
||||
dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
|
||||
}
|
||||
|
||||
void dccg2_get_dccg_ref_freq(struct dccg *dccg,
|
||||
|
@ -623,13 +623,6 @@ enum dc_status dcn20_enable_stream_timing(
|
||||
|
||||
/* TODO check if timing_changed, disable stream if timing changed */
|
||||
|
||||
/* Have to setup DSC here to make sure the bandwidth sent to DIG BE won't be bigger than
|
||||
* what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag will be automatically
|
||||
* set at a later time when the video is enabled (DP_VID_STREAM_EN = 1).
|
||||
*/
|
||||
if (pipe_ctx->stream->timing.flags.DSC)
|
||||
dp_set_dsc_on_stream(pipe_ctx, true);
|
||||
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
|
||||
opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst;
|
||||
opp_cnt++;
|
||||
@ -654,6 +647,9 @@ enum dc_status dcn20_enable_stream_timing(
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
}
|
||||
|
||||
if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
|
||||
dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
|
||||
|
||||
pipe_ctx->stream_res.tg->funcs->program_timing(
|
||||
pipe_ctx->stream_res.tg,
|
||||
&stream->timing,
|
||||
|
@ -111,7 +111,6 @@ static const struct hwseq_private_funcs dcn20_private_funcs = {
|
||||
.enable_power_gating_plane = dcn20_enable_power_gating_plane,
|
||||
.dpp_pg_control = dcn20_dpp_pg_control,
|
||||
.hubp_pg_control = dcn20_hubp_pg_control,
|
||||
.dsc_pg_control = NULL,
|
||||
.update_odm = dcn20_update_odm,
|
||||
.dsc_pg_control = dcn20_dsc_pg_control,
|
||||
.get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
|
||||
|
@ -1041,7 +1041,7 @@ static const struct resource_caps res_cap_nv14 = {
|
||||
};
|
||||
|
||||
static const struct dc_debug_options debug_defaults_drv = {
|
||||
.disable_dmcu = true,
|
||||
.disable_dmcu = false,
|
||||
.force_abm_enable = false,
|
||||
.timing_trace = false,
|
||||
.clock_trace = true,
|
||||
@ -1060,7 +1060,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
};
|
||||
|
||||
static const struct dc_debug_options debug_defaults_diags = {
|
||||
.disable_dmcu = true,
|
||||
.disable_dmcu = false,
|
||||
.force_abm_enable = false,
|
||||
.timing_trace = true,
|
||||
.clock_trace = true,
|
||||
@ -1671,7 +1671,7 @@ static void acquire_dsc(struct resource_context *res_ctx,
|
||||
}
|
||||
}
|
||||
|
||||
static void release_dsc(struct resource_context *res_ctx,
|
||||
void dcn20_release_dsc(struct resource_context *res_ctx,
|
||||
const struct resource_pool *pool,
|
||||
struct display_stream_compressor **dsc)
|
||||
{
|
||||
@ -1731,7 +1731,7 @@ static enum dc_status remove_dsc_from_stream_resource(struct dc *dc,
|
||||
pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (pipe_ctx->stream_res.dsc)
|
||||
release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
|
||||
dcn20_release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2502,7 +2502,7 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
|
||||
return secondary_pipe;
|
||||
}
|
||||
|
||||
void dcn20_merge_pipes_for_validate(
|
||||
static void dcn20_merge_pipes_for_validate(
|
||||
struct dc *dc,
|
||||
struct dc_state *context)
|
||||
{
|
||||
@ -2527,7 +2527,7 @@ void dcn20_merge_pipes_for_validate(
|
||||
odm_pipe->prev_odm_pipe = NULL;
|
||||
odm_pipe->next_odm_pipe = NULL;
|
||||
if (odm_pipe->stream_res.dsc)
|
||||
release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
|
||||
dcn20_release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
|
||||
/* Clear plane_res and stream_res */
|
||||
memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res));
|
||||
memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res));
|
||||
@ -2565,41 +2565,29 @@ int dcn20_validate_apply_pipe_split_flags(
|
||||
struct dc *dc,
|
||||
struct dc_state *context,
|
||||
int vlevel,
|
||||
bool *split)
|
||||
bool *split,
|
||||
bool *merge)
|
||||
{
|
||||
int i, pipe_idx, vlevel_split;
|
||||
int plane_count = 0;
|
||||
bool force_split = false;
|
||||
bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
|
||||
bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID;
|
||||
|
||||
/* Single display loop, exits if there is more than one display */
|
||||
if (context->stream_count > 1) {
|
||||
if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP)
|
||||
avoid_split = true;
|
||||
} else if (dc->debug.force_single_disp_pipe_split)
|
||||
force_split = true;
|
||||
|
||||
/* TODO: fix dc bugs and remove this split threshold thing */
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
bool exit_loop = false;
|
||||
|
||||
if (!pipe->stream || pipe->top_pipe)
|
||||
continue;
|
||||
|
||||
if (dc->debug.force_single_disp_pipe_split) {
|
||||
if (!force_split)
|
||||
force_split = true;
|
||||
else {
|
||||
force_split = false;
|
||||
exit_loop = true;
|
||||
}
|
||||
}
|
||||
if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) {
|
||||
if (avoid_split)
|
||||
avoid_split = false;
|
||||
else {
|
||||
avoid_split = true;
|
||||
exit_loop = true;
|
||||
}
|
||||
}
|
||||
if (exit_loop)
|
||||
break;
|
||||
if (pipe->stream && !pipe->prev_odm_pipe &&
|
||||
(!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
|
||||
++plane_count;
|
||||
}
|
||||
/* TODO: fix dc bugs and remove this split threshold thing */
|
||||
if (context->stream_count > dc->res_pool->pipe_count / 2)
|
||||
if (plane_count > dc->res_pool->pipe_count / 2)
|
||||
avoid_split = true;
|
||||
|
||||
/* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
|
||||
@ -2622,11 +2610,12 @@ int dcn20_validate_apply_pipe_split_flags(
|
||||
/* Split loop sets which pipe should be split based on dml outputs and dc flags */
|
||||
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
int pipe_plane = context->bw_ctx.dml.vba.pipe_plane[pipe_idx];
|
||||
|
||||
if (!context->res_ctx.pipe_ctx[i].stream)
|
||||
continue;
|
||||
|
||||
if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] > 1)
|
||||
if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_plane] > 1)
|
||||
split[i] = true;
|
||||
if ((pipe->stream->view_format ==
|
||||
VIEW_3D_FORMAT_SIDE_BY_SIDE ||
|
||||
@ -2639,10 +2628,44 @@ int dcn20_validate_apply_pipe_split_flags(
|
||||
split[i] = true;
|
||||
if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
|
||||
split[i] = true;
|
||||
context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = dm_odm_combine_mode_2to1;
|
||||
context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
|
||||
}
|
||||
context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] =
|
||||
context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
|
||||
context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] =
|
||||
context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane];
|
||||
|
||||
if (pipe->prev_odm_pipe && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] != dm_odm_combine_mode_disabled) {
|
||||
/*Already split odm pipe tree, don't try to split again*/
|
||||
split[i] = false;
|
||||
split[pipe->prev_odm_pipe->pipe_idx] = false;
|
||||
} else if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state
|
||||
&& context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
|
||||
/*Already split mpc tree, don't try to split again, assumes only 2x mpc combine*/
|
||||
split[i] = false;
|
||||
split[pipe->top_pipe->pipe_idx] = false;
|
||||
} else if (pipe->prev_odm_pipe || (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)) {
|
||||
if (split[i] == false) {
|
||||
/*Exiting mpc/odm combine*/
|
||||
merge[i] = true;
|
||||
if (pipe->prev_odm_pipe) {
|
||||
ASSERT(0); /*should not actually happen yet*/
|
||||
merge[pipe->prev_odm_pipe->pipe_idx] = true;
|
||||
} else
|
||||
merge[pipe->top_pipe->pipe_idx] = true;
|
||||
} else {
|
||||
/*Transition from mpc combine to odm combine or vice versa*/
|
||||
ASSERT(0); /*should not actually happen yet*/
|
||||
split[i] = true;
|
||||
merge[i] = true;
|
||||
if (pipe->prev_odm_pipe) {
|
||||
split[pipe->prev_odm_pipe->pipe_idx] = true;
|
||||
merge[pipe->prev_odm_pipe->pipe_idx] = true;
|
||||
} else {
|
||||
split[pipe->top_pipe->pipe_idx] = true;
|
||||
merge[pipe->top_pipe->pipe_idx] = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Adjust dppclk when split is forced, do not bother with dispclk */
|
||||
if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
|
||||
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
|
||||
@ -2684,7 +2707,7 @@ bool dcn20_fast_validate_bw(
|
||||
if (vlevel > context->bw_ctx.dml.soc.num_states)
|
||||
goto validate_fail;
|
||||
|
||||
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split);
|
||||
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL);
|
||||
|
||||
/*initialize pipe_just_split_from to invalid idx*/
|
||||
for (i = 0; i < MAX_PIPES; i++)
|
||||
|
@ -119,14 +119,15 @@ void dcn20_set_mcif_arb_params(
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
int pipe_cnt);
|
||||
bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
|
||||
void dcn20_merge_pipes_for_validate(
|
||||
struct dc *dc,
|
||||
struct dc_state *context);
|
||||
int dcn20_validate_apply_pipe_split_flags(
|
||||
struct dc *dc,
|
||||
struct dc_state *context,
|
||||
int vlevel,
|
||||
bool *split);
|
||||
bool *split,
|
||||
bool *merge);
|
||||
void dcn20_release_dsc(struct resource_context *res_ctx,
|
||||
const struct resource_pool *pool,
|
||||
struct display_stream_compressor **dsc);
|
||||
bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx);
|
||||
void dcn20_split_stream_for_mpc(
|
||||
struct resource_context *res_ctx,
|
||||
|
@ -112,3 +112,25 @@ void dcn21_optimize_pwr_state(
|
||||
true);
|
||||
}
|
||||
|
||||
/* If user hotplug a HDMI monitor while in monitor off,
|
||||
* OS will do a mode set (with output timing) but keep output off.
|
||||
* In this case DAL will ask vbios to power up the pll in the PHY.
|
||||
* If user unplug the monitor (while we are on monitor off) or
|
||||
* system attempt to enter modern standby (which we will disable PLL),
|
||||
* PHY will hang on the next mode set attempt.
|
||||
* if enable PLL follow by disable PLL (without executing lane enable/disable),
|
||||
* RDPCS_PHY_DP_MPLLB_STATE remains 1,
|
||||
* which indicate that PLL disable attempt actually didn’t go through.
|
||||
* As a workaround, insert PHY lane enable/disable before PLL disable.
|
||||
*/
|
||||
void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
if (!pipe_ctx->stream->dpms_off)
|
||||
return;
|
||||
|
||||
pipe_ctx->stream->dpms_off = false;
|
||||
core_link_enable_stream(context, pipe_ctx);
|
||||
core_link_disable_stream(pipe_ctx);
|
||||
pipe_ctx->stream->dpms_off = true;
|
||||
}
|
||||
|
||||
|
@ -44,4 +44,7 @@ void dcn21_optimize_pwr_state(
|
||||
const struct dc *dc,
|
||||
struct dc_state *context);
|
||||
|
||||
void dcn21_PLAT_58856_wa(struct dc_state *context,
|
||||
struct pipe_ctx *pipe_ctx);
|
||||
|
||||
#endif /* __DC_HWSS_DCN21_H__ */
|
||||
|
@ -119,7 +119,6 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
|
||||
.enable_power_gating_plane = dcn20_enable_power_gating_plane,
|
||||
.dpp_pg_control = dcn20_dpp_pg_control,
|
||||
.hubp_pg_control = dcn20_hubp_pg_control,
|
||||
.dsc_pg_control = NULL,
|
||||
.update_odm = dcn20_update_odm,
|
||||
.dsc_pg_control = dcn20_dsc_pg_control,
|
||||
.get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
|
||||
@ -131,6 +130,7 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
|
||||
.dccg_init = dcn20_dccg_init,
|
||||
.set_blend_lut = dcn20_set_blend_lut,
|
||||
.set_shaper_3dlut = dcn20_set_shaper_3dlut,
|
||||
.PLAT_58856_wa = dcn21_PLAT_58856_wa,
|
||||
};
|
||||
|
||||
void dcn21_hw_sequencer_construct(struct dc *dc)
|
||||
|
@ -855,7 +855,7 @@ static const struct dc_plane_cap plane_cap = {
|
||||
};
|
||||
|
||||
static const struct dc_debug_options debug_defaults_drv = {
|
||||
.disable_dmcu = true,
|
||||
.disable_dmcu = false,
|
||||
.force_abm_enable = false,
|
||||
.timing_trace = false,
|
||||
.clock_trace = true,
|
||||
@ -876,7 +876,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
};
|
||||
|
||||
static const struct dc_debug_options debug_defaults_diags = {
|
||||
.disable_dmcu = true,
|
||||
.disable_dmcu = false,
|
||||
.force_abm_enable = false,
|
||||
.timing_trace = true,
|
||||
.clock_trace = true,
|
||||
@ -1864,7 +1864,7 @@ static bool dcn21_resource_construct(
|
||||
goto create_fail;
|
||||
}
|
||||
|
||||
if (dc->config.psr_on_dmub) {
|
||||
if (dc->debug.disable_dmcu) {
|
||||
pool->base.psr = dmub_psr_create(ctx);
|
||||
|
||||
if (pool->base.psr == NULL) {
|
||||
|
@ -85,7 +85,6 @@ void dp_set_fec_enable(struct dc_link *link, bool enable);
|
||||
bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
|
||||
bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable);
|
||||
void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
|
||||
bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable);
|
||||
bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx);
|
||||
|
||||
#endif /* __DC_LINK_DP_H__ */
|
||||
|
@ -27,11 +27,12 @@
|
||||
#define __DAL_DCCG_H__
|
||||
|
||||
#include "dc_types.h"
|
||||
#include "hw_shared.h"
|
||||
|
||||
struct dccg {
|
||||
struct dc_context *ctx;
|
||||
const struct dccg_funcs *funcs;
|
||||
|
||||
int pipe_dppclk_khz[MAX_PIPES];
|
||||
int ref_dppclk;
|
||||
};
|
||||
|
||||
|
@ -145,6 +145,8 @@ struct hwseq_private_funcs {
|
||||
const struct dc_plane_state *plane_state);
|
||||
bool (*set_shaper_3dlut)(struct pipe_ctx *pipe_ctx,
|
||||
const struct dc_plane_state *plane_state);
|
||||
void (*PLAT_58856_wa)(struct dc_state *context,
|
||||
struct pipe_ctx *pipe_ctx);
|
||||
};
|
||||
|
||||
struct dce_hwseq {
|
||||
|
@ -104,7 +104,7 @@ enum dmub_window_id {
|
||||
DMUB_WINDOW_4_MAILBOX,
|
||||
DMUB_WINDOW_5_TRACEBUFF,
|
||||
DMUB_WINDOW_6_FW_STATE,
|
||||
DMUB_WINDOW_7_RESERVED,
|
||||
DMUB_WINDOW_7_SCRATCH_MEM,
|
||||
DMUB_WINDOW_TOTAL,
|
||||
};
|
||||
|
||||
@ -316,6 +316,7 @@ struct dmub_srv {
|
||||
enum dmub_asic asic;
|
||||
void *user_ctx;
|
||||
bool is_virtual;
|
||||
struct dmub_fb scratch_mem_fb;
|
||||
volatile const struct dmub_fw_state *fw_state;
|
||||
|
||||
/* private: internal use only */
|
||||
|
@ -52,8 +52,11 @@
|
||||
/* Default tracebuffer size if meta is absent. */
|
||||
#define DMUB_TRACE_BUFFER_SIZE (1024)
|
||||
|
||||
/* Default scratch mem size. */
|
||||
#define DMUB_SCRATCH_MEM_SIZE (256)
|
||||
|
||||
/* Number of windows in use. */
|
||||
#define DMUB_NUM_WINDOWS (DMUB_WINDOW_6_FW_STATE + 1)
|
||||
#define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL)
|
||||
/* Base addresses. */
|
||||
|
||||
#define DMUB_CW0_BASE (0x60000000)
|
||||
@ -211,9 +214,11 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
|
||||
struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX];
|
||||
struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF];
|
||||
struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE];
|
||||
struct dmub_region *scratch_mem = &out->regions[DMUB_WINDOW_7_SCRATCH_MEM];
|
||||
const struct dmub_fw_meta_info *fw_info;
|
||||
uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
|
||||
uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
|
||||
uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
|
||||
|
||||
if (!dmub->sw_init)
|
||||
return DMUB_STATUS_INVALID;
|
||||
@ -256,7 +261,10 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
|
||||
fw_state->base = dmub_align(trace_buff->top, 256);
|
||||
fw_state->top = fw_state->base + dmub_align(fw_state_size, 64);
|
||||
|
||||
out->fb_size = dmub_align(fw_state->top, 4096);
|
||||
scratch_mem->base = dmub_align(fw_state->top, 256);
|
||||
scratch_mem->top = scratch_mem->base + dmub_align(scratch_mem_size, 64);
|
||||
|
||||
out->fb_size = dmub_align(scratch_mem->top, 4096);
|
||||
|
||||
return DMUB_STATUS_OK;
|
||||
}
|
||||
@ -334,6 +342,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
|
||||
struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX];
|
||||
struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
|
||||
struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
|
||||
struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
|
||||
|
||||
struct dmub_rb_init_params rb_params;
|
||||
struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6;
|
||||
@ -370,7 +379,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
|
||||
dmub->hw_funcs.reset(dmub);
|
||||
|
||||
if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb &&
|
||||
fw_state_fb) {
|
||||
fw_state_fb && scratch_mem_fb) {
|
||||
cw2.offset.quad_part = data_fb->gpu_addr;
|
||||
cw2.region.base = DMUB_CW0_BASE + inst_fb->size;
|
||||
cw2.region.top = cw2.region.base + data_fb->size;
|
||||
@ -396,6 +405,8 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
|
||||
|
||||
dmub->fw_state = fw_state_fb->cpu_addr;
|
||||
|
||||
dmub->scratch_mem_fb = *scratch_mem_fb;
|
||||
|
||||
if (dmub->hw_funcs.setup_windows)
|
||||
dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4,
|
||||
&cw5, &cw6);
|
||||
|
@ -328,7 +328,8 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
|
||||
/* add display to connection */
|
||||
hdcp->connection.link = *link;
|
||||
*display_container = *display;
|
||||
status = mod_hdcp_add_display_to_topology(hdcp, display->index);
|
||||
status = mod_hdcp_add_display_to_topology(hdcp, display_container);
|
||||
|
||||
if (status != MOD_HDCP_STATUS_SUCCESS)
|
||||
goto out;
|
||||
|
||||
@ -374,7 +375,7 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
|
||||
status = mod_hdcp_remove_display_from_topology(hdcp, index);
|
||||
if (status != MOD_HDCP_STATUS_SUCCESS)
|
||||
goto out;
|
||||
display->state = MOD_HDCP_DISPLAY_INACTIVE;
|
||||
memset(display, 0, sizeof(struct mod_hdcp_display));
|
||||
|
||||
/* request authentication when connection is not reset */
|
||||
if (current_state(hdcp) != HDCP_UNINITIALIZED)
|
||||
|
@ -328,7 +328,7 @@ void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
|
||||
|
||||
/* psp functions */
|
||||
enum mod_hdcp_status mod_hdcp_add_display_to_topology(
|
||||
struct mod_hdcp *hdcp, uint8_t index);
|
||||
struct mod_hdcp *hdcp, struct mod_hdcp_display *display);
|
||||
enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
|
||||
struct mod_hdcp *hdcp, uint8_t index);
|
||||
enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp);
|
||||
@ -503,11 +503,6 @@ static inline uint8_t is_display_active(struct mod_hdcp_display *display)
|
||||
return display->state >= MOD_HDCP_DISPLAY_ACTIVE;
|
||||
}
|
||||
|
||||
static inline uint8_t is_display_added(struct mod_hdcp_display *display)
|
||||
{
|
||||
return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
|
||||
}
|
||||
|
||||
static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display)
|
||||
{
|
||||
return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
|
||||
@ -515,34 +510,23 @@ static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *dis
|
||||
|
||||
static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp)
|
||||
{
|
||||
uint8_t added_count = 0;
|
||||
uint8_t active_count = 0;
|
||||
uint8_t i;
|
||||
|
||||
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
|
||||
if (is_display_active(&hdcp->displays[i]))
|
||||
added_count++;
|
||||
return added_count;
|
||||
active_count++;
|
||||
return active_count;
|
||||
}
|
||||
|
||||
static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp)
|
||||
{
|
||||
uint8_t added_count = 0;
|
||||
uint8_t i;
|
||||
|
||||
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
|
||||
if (is_display_added(&hdcp->displays[i]))
|
||||
added_count++;
|
||||
return added_count;
|
||||
}
|
||||
|
||||
static inline struct mod_hdcp_display *get_first_added_display(
|
||||
static inline struct mod_hdcp_display *get_first_active_display(
|
||||
struct mod_hdcp *hdcp)
|
||||
{
|
||||
uint8_t i;
|
||||
struct mod_hdcp_display *display = NULL;
|
||||
|
||||
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
|
||||
if (is_display_added(&hdcp->displays[i])) {
|
||||
if (is_display_active(&hdcp->displays[i])) {
|
||||
display = &hdcp->displays[i];
|
||||
break;
|
||||
}
|
||||
|
@ -129,7 +129,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
|
||||
static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
|
||||
{
|
||||
/* device count must be greater than or equal to tracked hdcp displays */
|
||||
return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
|
||||
return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
|
||||
MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE :
|
||||
MOD_HDCP_STATUS_SUCCESS;
|
||||
}
|
||||
|
@ -208,7 +208,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
|
||||
static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
|
||||
{
|
||||
/* device count must be greater than or equal to tracked hdcp displays */
|
||||
return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
|
||||
return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
|
||||
MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE :
|
||||
MOD_HDCP_STATUS_SUCCESS;
|
||||
}
|
||||
|
@ -408,7 +408,7 @@ enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp)
|
||||
enum mod_hdcp_status status;
|
||||
|
||||
if (is_dp_hdcp(hdcp)) {
|
||||
hdcp->auth.msg.hdcp2.ake_cert[0] = 3;
|
||||
hdcp->auth.msg.hdcp2.ake_cert[0] = HDCP_2_2_AKE_SEND_CERT;
|
||||
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
|
||||
hdcp->auth.msg.hdcp2.ake_cert+1,
|
||||
sizeof(hdcp->auth.msg.hdcp2.ake_cert)-1);
|
||||
@ -426,7 +426,7 @@ enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp)
|
||||
enum mod_hdcp_status status;
|
||||
|
||||
if (is_dp_hdcp(hdcp)) {
|
||||
hdcp->auth.msg.hdcp2.ake_h_prime[0] = 7;
|
||||
hdcp->auth.msg.hdcp2.ake_h_prime[0] = HDCP_2_2_AKE_SEND_HPRIME;
|
||||
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
|
||||
hdcp->auth.msg.hdcp2.ake_h_prime+1,
|
||||
sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)-1);
|
||||
@ -444,7 +444,7 @@ enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp)
|
||||
enum mod_hdcp_status status;
|
||||
|
||||
if (is_dp_hdcp(hdcp)) {
|
||||
hdcp->auth.msg.hdcp2.ake_pairing_info[0] = 8;
|
||||
hdcp->auth.msg.hdcp2.ake_pairing_info[0] = HDCP_2_2_AKE_SEND_PAIRING_INFO;
|
||||
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
|
||||
hdcp->auth.msg.hdcp2.ake_pairing_info+1,
|
||||
sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)-1);
|
||||
@ -462,7 +462,7 @@ enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp)
|
||||
enum mod_hdcp_status status;
|
||||
|
||||
if (is_dp_hdcp(hdcp)) {
|
||||
hdcp->auth.msg.hdcp2.lc_l_prime[0] = 10;
|
||||
hdcp->auth.msg.hdcp2.lc_l_prime[0] = HDCP_2_2_LC_SEND_LPRIME;
|
||||
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
|
||||
hdcp->auth.msg.hdcp2.lc_l_prime+1,
|
||||
sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)-1);
|
||||
@ -484,7 +484,7 @@ enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp)
|
||||
uint32_t rx_id_list_size = 0;
|
||||
uint32_t bytes_read = 0;
|
||||
|
||||
hdcp->auth.msg.hdcp2.rx_id_list[0] = 12;
|
||||
hdcp->auth.msg.hdcp2.rx_id_list[0] = HDCP_2_2_REP_SEND_RECVID_LIST;
|
||||
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
|
||||
hdcp->auth.msg.hdcp2.rx_id_list+1,
|
||||
HDCP_MAX_AUX_TRANSACTION_SIZE);
|
||||
@ -511,7 +511,7 @@ enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp)
|
||||
enum mod_hdcp_status status;
|
||||
|
||||
if (is_dp_hdcp(hdcp)) {
|
||||
hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = 17;
|
||||
hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = HDCP_2_2_REP_STREAM_READY;
|
||||
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
|
||||
hdcp->auth.msg.hdcp2.repeater_auth_stream_ready+1,
|
||||
sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)-1);
|
||||
|
@ -54,7 +54,7 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
|
||||
|
||||
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
|
||||
|
||||
if (!display || !is_display_added(display))
|
||||
if (!display || !is_display_active(display))
|
||||
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
|
||||
|
||||
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
|
||||
@ -76,22 +76,18 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
|
||||
}
|
||||
|
||||
enum mod_hdcp_status mod_hdcp_add_display_to_topology(
|
||||
struct mod_hdcp *hdcp, uint8_t index)
|
||||
struct mod_hdcp *hdcp, struct mod_hdcp_display *display)
|
||||
{
|
||||
struct psp_context *psp = hdcp->config.psp.handle;
|
||||
struct ta_dtm_shared_memory *dtm_cmd;
|
||||
struct mod_hdcp_display *display =
|
||||
get_active_display_at_index(hdcp, index);
|
||||
struct mod_hdcp_link *link = &hdcp->connection.link;
|
||||
|
||||
if (!psp->dtm_context.dtm_initialized) {
|
||||
DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
|
||||
display->state = MOD_HDCP_DISPLAY_INACTIVE;
|
||||
return MOD_HDCP_STATUS_FAILURE;
|
||||
}
|
||||
|
||||
if (!display || is_display_added(display))
|
||||
return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
|
||||
|
||||
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
|
||||
|
||||
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
|
||||
@ -113,20 +109,21 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(
|
||||
|
||||
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
|
||||
|
||||
if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
|
||||
if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
|
||||
display->state = MOD_HDCP_DISPLAY_INACTIVE;
|
||||
return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
|
||||
}
|
||||
|
||||
display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
|
||||
HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
|
||||
|
||||
return MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
return MOD_HDCP_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
|
||||
{
|
||||
|
||||
struct psp_context *psp = hdcp->config.psp.handle;
|
||||
struct mod_hdcp_display *display = get_first_added_display(hdcp);
|
||||
struct mod_hdcp_display *display = get_first_active_display(hdcp);
|
||||
struct ta_hdcp_shared_memory *hdcp_cmd;
|
||||
|
||||
if (!psp->hdcp_context.hdcp_initialized) {
|
||||
@ -179,7 +176,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
|
||||
if (is_display_encryption_enabled(
|
||||
&hdcp->displays[i])) {
|
||||
hdcp->displays[i].state =
|
||||
MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
|
||||
MOD_HDCP_DISPLAY_ACTIVE;
|
||||
HDCP_HDCP1_DISABLED_TRACE(hdcp,
|
||||
hdcp->displays[i].index);
|
||||
}
|
||||
@ -231,7 +228,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
|
||||
{
|
||||
struct psp_context *psp = hdcp->config.psp.handle;
|
||||
struct ta_hdcp_shared_memory *hdcp_cmd;
|
||||
struct mod_hdcp_display *display = get_first_added_display(hdcp);
|
||||
struct mod_hdcp_display *display = get_first_active_display(hdcp);
|
||||
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
@ -301,8 +298,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
|
||||
|
||||
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
|
||||
|
||||
if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
|
||||
hdcp->displays[i].adjust.disable)
|
||||
if (hdcp->displays[i].adjust.disable)
|
||||
continue;
|
||||
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
@ -364,7 +360,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
|
||||
{
|
||||
struct psp_context *psp = hdcp->config.psp.handle;
|
||||
struct ta_hdcp_shared_memory *hdcp_cmd;
|
||||
struct mod_hdcp_display *display = get_first_added_display(hdcp);
|
||||
struct mod_hdcp_display *display = get_first_active_display(hdcp);
|
||||
|
||||
if (!psp->hdcp_context.hdcp_initialized) {
|
||||
DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized");
|
||||
@ -423,7 +419,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
|
||||
if (is_display_encryption_enabled(
|
||||
&hdcp->displays[i])) {
|
||||
hdcp->displays[i].state =
|
||||
MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
|
||||
MOD_HDCP_DISPLAY_ACTIVE;
|
||||
HDCP_HDCP2_DISABLED_TRACE(hdcp,
|
||||
hdcp->displays[i].index);
|
||||
}
|
||||
@ -662,7 +658,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp)
|
||||
{
|
||||
struct psp_context *psp = hdcp->config.psp.handle;
|
||||
struct ta_hdcp_shared_memory *hdcp_cmd;
|
||||
struct mod_hdcp_display *display = get_first_added_display(hdcp);
|
||||
struct mod_hdcp_display *display = get_first_active_display(hdcp);
|
||||
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
@ -747,8 +743,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
|
||||
|
||||
|
||||
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
|
||||
if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
|
||||
hdcp->displays[i].adjust.disable)
|
||||
if (hdcp->displays[i].adjust.disable)
|
||||
continue;
|
||||
hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
|
||||
hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
|
||||
|
@ -117,7 +117,6 @@ enum mod_hdcp_operation_mode {
|
||||
enum mod_hdcp_display_state {
|
||||
MOD_HDCP_DISPLAY_INACTIVE = 0,
|
||||
MOD_HDCP_DISPLAY_ACTIVE,
|
||||
MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED,
|
||||
MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED
|
||||
};
|
||||
|
||||
|
@ -34,8 +34,7 @@ struct dc_info_packet;
|
||||
struct mod_vrr_params;
|
||||
|
||||
void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
|
||||
struct dc_info_packet *info_packet,
|
||||
bool *use_vsc_sdp_for_colorimetry);
|
||||
struct dc_info_packet *info_packet);
|
||||
|
||||
void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
|
||||
struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue);
|
||||
|
@ -130,8 +130,7 @@ enum ColorimetryYCCDP {
|
||||
};
|
||||
|
||||
void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
|
||||
struct dc_info_packet *info_packet,
|
||||
bool *use_vsc_sdp_for_colorimetry)
|
||||
struct dc_info_packet *info_packet)
|
||||
{
|
||||
unsigned int vsc_packet_revision = vsc_packet_undefined;
|
||||
unsigned int i;
|
||||
@ -139,11 +138,6 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
|
||||
unsigned int colorimetryFormat = 0;
|
||||
bool stereo3dSupport = false;
|
||||
|
||||
/* Initialize first, later if infopacket is valid determine if VSC SDP
|
||||
* should be used to signal colorimetry format and pixel encoding.
|
||||
*/
|
||||
*use_vsc_sdp_for_colorimetry = false;
|
||||
|
||||
if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE) {
|
||||
vsc_packet_revision = vsc_packet_rev1;
|
||||
stereo3dSupport = true;
|
||||
@ -153,9 +147,8 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
|
||||
if (stream->psr_version != 0)
|
||||
vsc_packet_revision = vsc_packet_rev2;
|
||||
|
||||
/* Update to revision 5 for extended colorimetry support for DPCD 1.4+ */
|
||||
if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
|
||||
stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
|
||||
/* Update to revision 5 for extended colorimetry support */
|
||||
if (stream->use_vsc_sdp_for_colorimetry)
|
||||
vsc_packet_revision = vsc_packet_rev5;
|
||||
|
||||
/* VSC packet not needed based on the features
|
||||
@ -269,13 +262,6 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
|
||||
|
||||
info_packet->valid = true;
|
||||
|
||||
/* If we are using VSC SDP revision 05h, use this to signal for
|
||||
* colorimetry format and pixel encoding. HW should later be
|
||||
* programmed to set MSA MISC1 bit 6 to indicate ignore
|
||||
* colorimetry format and pixel encoding in the MSA.
|
||||
*/
|
||||
*use_vsc_sdp_for_colorimetry = true;
|
||||
|
||||
/* Set VSC SDP fields for pixel encoding and colorimetry format from DP 1.3 specs
|
||||
* Data Bytes DB 18~16
|
||||
* Bits 3:0 (Colorimetry Format) | Bits 7:4 (Pixel Encoding)
|
||||
|
@ -23,15 +23,12 @@
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include "pp_debug.h"
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_smu.h"
|
||||
#include "smu_internal.h"
|
||||
#include "soc15_common.h"
|
||||
#include "smu_v11_0.h"
|
||||
#include "smu_v12_0.h"
|
||||
#include "atom.h"
|
||||
#include "amd_pcie.h"
|
||||
#include "vega20_ppt.h"
|
||||
#include "arcturus_ppt.h"
|
||||
#include "navi10_ppt.h"
|
||||
@ -935,6 +932,13 @@ static int smu_sw_init(void *handle)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (adev->smu.ppt_funcs->i2c_eeprom_init) {
|
||||
ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -944,6 +948,9 @@ static int smu_sw_fini(void *handle)
|
||||
struct smu_context *smu = &adev->smu;
|
||||
int ret;
|
||||
|
||||
if (adev->smu.ppt_funcs->i2c_eeprom_fini)
|
||||
smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
|
||||
|
||||
kfree(smu->irq_source);
|
||||
smu->irq_source = NULL;
|
||||
|
||||
@ -1463,21 +1470,26 @@ static int smu_disable_dpm(struct smu_context *smu)
|
||||
}
|
||||
|
||||
/*
|
||||
* For baco on Arcturus, this operation
|
||||
* (disable all smu feature) will be handled by SMU FW.
|
||||
* Disable all enabled SMU features.
|
||||
* This should be handled in SMU FW, as a backup
|
||||
* driver can issue call to SMU FW until sequence
|
||||
* in SMU FW is operational.
|
||||
*/
|
||||
if (adev->asic_type == CHIP_ARCTURUS) {
|
||||
if (use_baco && (smu_version > 0x360e00))
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Disable all enabled SMU features */
|
||||
ret = smu_system_features_control(smu, false);
|
||||
if (ret) {
|
||||
pr_err("Failed to disable smu features.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Arcturus does not have BACO bit in disable feature mask.
|
||||
* Enablement of BACO bit on Arcturus should be skipped.
|
||||
*/
|
||||
if (adev->asic_type == CHIP_ARCTURUS) {
|
||||
if (use_baco && (smu_version > 0x360e00))
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* For baco, need to leave BACO feature enabled */
|
||||
if (use_baco) {
|
||||
/*
|
||||
|
@ -21,7 +21,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "pp_debug.h"
|
||||
#include <linux/firmware.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_smu.h"
|
||||
@ -42,7 +41,7 @@
|
||||
#include <linux/pci.h>
|
||||
#include "amdgpu_ras.h"
|
||||
|
||||
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev
|
||||
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
|
||||
|
||||
#define CTF_OFFSET_EDGE 5
|
||||
#define CTF_OFFSET_HOTSPOT 5
|
||||
@ -2191,7 +2190,7 @@ static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
|
||||
control->class = I2C_CLASS_SPD;
|
||||
control->dev.parent = &adev->pdev->dev;
|
||||
control->algo = &arcturus_i2c_eeprom_i2c_algo;
|
||||
snprintf(control->name, sizeof(control->name), "RAS EEPROM");
|
||||
snprintf(control->name, sizeof(control->name), "AMDGPU EEPROM");
|
||||
|
||||
res = i2c_add_adapter(control);
|
||||
if (res)
|
||||
|
@ -1250,7 +1250,7 @@ static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
|
||||
switch (sources) {
|
||||
default:
|
||||
pr_err("Unknown throttling event sources.");
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case 0:
|
||||
protection = false;
|
||||
/* src is unused */
|
||||
@ -3698,12 +3698,12 @@ static int smu7_request_link_speed_change_before_state_change(
|
||||
data->force_pcie_gen = PP_PCIEGen2;
|
||||
if (current_link_speed == PP_PCIEGen2)
|
||||
break;
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case PP_PCIEGen2:
|
||||
if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
|
||||
break;
|
||||
fallthrough;
|
||||
#endif
|
||||
/* fall through */
|
||||
default:
|
||||
data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
|
||||
break;
|
||||
|
@ -21,7 +21,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "pp_debug.h"
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/pci.h>
|
||||
#include "amdgpu.h"
|
||||
@ -31,7 +30,6 @@
|
||||
#include "amdgpu_atomfirmware.h"
|
||||
#include "smu_v11_0.h"
|
||||
#include "smu11_driver_if_navi10.h"
|
||||
#include "soc15_common.h"
|
||||
#include "atom.h"
|
||||
#include "navi10_ppt.h"
|
||||
#include "smu_v11_0_pptable.h"
|
||||
|
@ -24,7 +24,6 @@
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_smu.h"
|
||||
#include "smu_internal.h"
|
||||
#include "soc15_common.h"
|
||||
#include "smu_v12_0_ppsmc.h"
|
||||
#include "smu12_driver_if.h"
|
||||
#include "smu_v12_0.h"
|
||||
|
@ -26,7 +26,6 @@
|
||||
|
||||
#define SMU_11_0_PARTIAL_PPTABLE
|
||||
|
||||
#include "pp_debug.h"
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_smu.h"
|
||||
#include "smu_internal.h"
|
||||
|
@ -20,7 +20,6 @@
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "pp_debug.h"
|
||||
#include <linux/firmware.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_smu.h"
|
||||
|
@ -33,6 +33,8 @@
|
||||
#include "smu7_smumgr.h"
|
||||
#include "vega20_hwmgr.h"
|
||||
|
||||
#include "smu_v11_0_i2c.h"
|
||||
|
||||
/* MP Apertures */
|
||||
#define MP0_Public 0x03800000
|
||||
#define MP0_SRAM 0x03900000
|
||||
@ -406,6 +408,7 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
|
||||
struct vega20_smumgr *priv;
|
||||
unsigned long tools_size = 0x19000;
|
||||
int ret = 0;
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
|
||||
struct cgs_firmware_info info = {0};
|
||||
|
||||
@ -505,6 +508,10 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
|
||||
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
|
||||
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
|
||||
|
||||
ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
|
||||
if (ret)
|
||||
goto err4;
|
||||
|
||||
return 0;
|
||||
|
||||
err4:
|
||||
@ -537,6 +544,9 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct vega20_smumgr *priv =
|
||||
(struct vega20_smumgr *)(hwmgr->smu_backend);
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
|
||||
smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
|
||||
|
||||
if (priv) {
|
||||
amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
|
||||
@ -560,6 +570,7 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
|
||||
kfree(hwmgr->smu_backend);
|
||||
hwmgr->smu_backend = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -21,7 +21,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "pp_debug.h"
|
||||
#include <linux/firmware.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_smu.h"
|
||||
|
@ -1280,6 +1280,85 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
|
||||
#undef DEVICE_ID_ANY
|
||||
#undef DEVICE_ID
|
||||
|
||||
struct edid_quirk {
|
||||
u8 mfg_id[2];
|
||||
u8 prod_id[2];
|
||||
u32 quirks;
|
||||
};
|
||||
|
||||
#define MFG(first, second) { (first), (second) }
|
||||
#define PROD_ID(first, second) { (first), (second) }
|
||||
|
||||
/*
|
||||
* Some devices have unreliable OUIDs where they don't set the device ID
|
||||
* correctly, and as a result we need to use the EDID for finding additional
|
||||
* DP quirks in such cases.
|
||||
*/
|
||||
static const struct edid_quirk edid_quirk_list[] = {
|
||||
/* Optional 4K AMOLED panel in the ThinkPad X1 Extreme 2nd Generation
|
||||
* only supports DPCD backlight controls
|
||||
*/
|
||||
{ MFG(0x4c, 0x83), PROD_ID(0x41, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
|
||||
/*
|
||||
* Some Dell CML 2020 systems have panels support both AUX and PWM
|
||||
* backlight control, and some only support AUX backlight control. All
|
||||
* said panels start up in AUX mode by default, and we don't have any
|
||||
* support for disabling HDR mode on these panels which would be
|
||||
* required to switch to PWM backlight control mode (plus, I'm not
|
||||
* even sure we want PWM backlight controls over DPCD backlight
|
||||
* controls anyway...). Until we have a better way of detecting these,
|
||||
* force DPCD backlight mode on all of them.
|
||||
*/
|
||||
{ MFG(0x06, 0xaf), PROD_ID(0x9b, 0x32), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
|
||||
{ MFG(0x06, 0xaf), PROD_ID(0xeb, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
|
||||
{ MFG(0x4d, 0x10), PROD_ID(0xc7, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
|
||||
{ MFG(0x4d, 0x10), PROD_ID(0xe6, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
|
||||
};
|
||||
|
||||
#undef MFG
|
||||
#undef PROD_ID
|
||||
|
||||
/**
|
||||
* drm_dp_get_edid_quirks() - Check the EDID of a DP device to find additional
|
||||
* DP-specific quirks
|
||||
* @edid: The EDID to check
|
||||
*
|
||||
* While OUIDs are meant to be used to recognize a DisplayPort device, a lot
|
||||
* of manufacturers don't seem to like following standards and neglect to fill
|
||||
* the dev-ID in, making it impossible to only use OUIDs for determining
|
||||
* quirks in some cases. This function can be used to check the EDID and look
|
||||
* up any additional DP quirks. The bits returned by this function correspond
|
||||
* to the quirk bits in &drm_dp_quirk.
|
||||
*
|
||||
* Returns: a bitmask of quirks, if any. The driver can check this using
|
||||
* drm_dp_has_quirk().
|
||||
*/
|
||||
u32 drm_dp_get_edid_quirks(const struct edid *edid)
|
||||
{
|
||||
const struct edid_quirk *quirk;
|
||||
u32 quirks = 0;
|
||||
int i;
|
||||
|
||||
if (!edid)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
|
||||
quirk = &edid_quirk_list[i];
|
||||
if (memcmp(quirk->mfg_id, edid->mfg_id,
|
||||
sizeof(edid->mfg_id)) == 0 &&
|
||||
memcmp(quirk->prod_id, edid->prod_code,
|
||||
sizeof(edid->prod_code)) == 0)
|
||||
quirks |= quirk->quirks;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("DP sink: EDID mfg %*phD prod-ID %*phD quirks: 0x%04x\n",
|
||||
(int)sizeof(edid->mfg_id), edid->mfg_id,
|
||||
(int)sizeof(edid->prod_code), edid->prod_code, quirks);
|
||||
|
||||
return quirks;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_get_edid_quirks);
|
||||
|
||||
/**
|
||||
* drm_dp_read_desc - read sink/branch descriptor from DPCD
|
||||
* @aux: DisplayPort AUX channel
|
||||
|
@ -5490,7 +5490,8 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
|
||||
if (drm_dp_read_desc(port->mgr->aux, &desc, true))
|
||||
return NULL;
|
||||
|
||||
if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
|
||||
if (drm_dp_has_quirk(&desc, 0,
|
||||
DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
|
||||
port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
|
||||
port->parent == port->mgr->mst_primary) {
|
||||
u8 downstreamport;
|
||||
|
@ -12,6 +12,7 @@
|
||||
|
||||
#include "common.xml.h"
|
||||
#include "state.xml.h"
|
||||
#include "state_blt.xml.h"
|
||||
#include "state_hi.xml.h"
|
||||
#include "state_3d.xml.h"
|
||||
#include "cmdstream.xml.h"
|
||||
@ -233,6 +234,8 @@ void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
|
||||
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
|
||||
unsigned int waitlink_offset = buffer->user_size - 16;
|
||||
u32 link_target, flush = 0;
|
||||
bool has_blt = !!(gpu->identity.minor_features5 &
|
||||
chipMinorFeatures5_BLT_ENGINE);
|
||||
|
||||
lockdep_assert_held(&gpu->lock);
|
||||
|
||||
@ -248,16 +251,38 @@ void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
|
||||
if (flush) {
|
||||
unsigned int dwords = 7;
|
||||
|
||||
if (has_blt)
|
||||
dwords += 10;
|
||||
|
||||
link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
|
||||
|
||||
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
|
||||
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
|
||||
if (has_blt) {
|
||||
CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
|
||||
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
|
||||
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
|
||||
CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
|
||||
}
|
||||
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
|
||||
if (gpu->exec_state == ETNA_PIPE_3D)
|
||||
CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
|
||||
VIVS_TS_FLUSH_CACHE_FLUSH);
|
||||
if (gpu->exec_state == ETNA_PIPE_3D) {
|
||||
if (has_blt) {
|
||||
CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
|
||||
CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
|
||||
CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
|
||||
} else {
|
||||
CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
|
||||
VIVS_TS_FLUSH_CACHE_FLUSH);
|
||||
}
|
||||
}
|
||||
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
|
||||
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
|
||||
if (has_blt) {
|
||||
CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
|
||||
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
|
||||
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
|
||||
CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
|
||||
}
|
||||
CMD_END(buffer);
|
||||
|
||||
etnaviv_buffer_replace_wait(buffer, waitlink_offset,
|
||||
@ -323,6 +348,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
|
||||
bool switch_mmu_context = gpu->mmu_context != mmu_context;
|
||||
unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
|
||||
bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
|
||||
bool has_blt = !!(gpu->identity.minor_features5 &
|
||||
chipMinorFeatures5_BLT_ENGINE);
|
||||
|
||||
lockdep_assert_held(&gpu->lock);
|
||||
|
||||
@ -433,6 +460,15 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
|
||||
* 2 semaphore stall + 1 event + 1 wait + 1 link.
|
||||
*/
|
||||
return_dwords = 7;
|
||||
|
||||
/*
|
||||
* When the BLT engine is present we need 6 more dwords in the return
|
||||
* target: 3 enable/flush/disable + 4 enable/semaphore stall/disable,
|
||||
* but we don't need the normal TS flush state.
|
||||
*/
|
||||
if (has_blt)
|
||||
return_dwords += 6;
|
||||
|
||||
return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
|
||||
CMD_LINK(cmdbuf, return_dwords, return_target);
|
||||
|
||||
@ -447,11 +483,25 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
|
||||
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
|
||||
VIVS_GL_FLUSH_CACHE_DEPTH |
|
||||
VIVS_GL_FLUSH_CACHE_COLOR);
|
||||
CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
|
||||
VIVS_TS_FLUSH_CACHE_FLUSH);
|
||||
if (has_blt) {
|
||||
CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
|
||||
CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
|
||||
CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
|
||||
} else {
|
||||
CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
|
||||
VIVS_TS_FLUSH_CACHE_FLUSH);
|
||||
}
|
||||
}
|
||||
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
|
||||
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
|
||||
|
||||
if (has_blt) {
|
||||
CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
|
||||
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
|
||||
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
|
||||
CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
|
||||
}
|
||||
|
||||
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
|
||||
VIVS_GL_EVENT_FROM_PE);
|
||||
CMD_WAIT(buffer);
|
||||
|
@ -541,6 +541,7 @@ static int etnaviv_bind(struct device *dev)
|
||||
mutex_init(&priv->gem_lock);
|
||||
INIT_LIST_HEAD(&priv->gem_list);
|
||||
priv->num_gpus = 0;
|
||||
priv->shm_gfp_mask = GFP_HIGHUSER | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
|
||||
|
||||
priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev);
|
||||
if (IS_ERR(priv->cmdbuf_suballoc)) {
|
||||
|
@ -35,6 +35,7 @@ struct etnaviv_drm_private {
|
||||
int num_gpus;
|
||||
struct device_dma_parameters dma_parms;
|
||||
struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
|
||||
gfp_t shm_gfp_mask;
|
||||
|
||||
struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
|
||||
struct etnaviv_iommu_global *mmu_global;
|
||||
|
@ -602,6 +602,7 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
|
||||
int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
||||
u32 size, u32 flags, u32 *handle)
|
||||
{
|
||||
struct etnaviv_drm_private *priv = dev->dev_private;
|
||||
struct drm_gem_object *obj = NULL;
|
||||
int ret;
|
||||
|
||||
@ -624,8 +625,7 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
||||
* above new_inode() why this is required _and_ expected if you're
|
||||
* going to pin these pages.
|
||||
*/
|
||||
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
|
||||
__GFP_RETRY_MAYFAIL | __GFP_NOWARN);
|
||||
mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
|
||||
|
||||
etnaviv_gem_obj_add(dev, obj);
|
||||
|
||||
|
@ -105,7 +105,7 @@ struct etnaviv_gem_submit {
|
||||
unsigned int nr_pmrs;
|
||||
struct etnaviv_perfmon_request *pmrs;
|
||||
unsigned int nr_bos;
|
||||
struct etnaviv_gem_submit_bo bos[0];
|
||||
struct etnaviv_gem_submit_bo bos[];
|
||||
/* No new members here, the previous one is variable-length! */
|
||||
};
|
||||
|
||||
|
@ -333,9 +333,13 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
|
||||
gpu->identity.revision = etnaviv_field(chipIdentity,
|
||||
VIVS_HI_CHIP_IDENTITY_REVISION);
|
||||
} else {
|
||||
u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
|
||||
|
||||
gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
|
||||
gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
|
||||
gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID);
|
||||
gpu->identity.customer_id = gpu_read(gpu, VIVS_HI_CHIP_CUSTOMER_ID);
|
||||
gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID);
|
||||
|
||||
/*
|
||||
* !!!! HACK ALERT !!!!
|
||||
@ -350,7 +354,6 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
|
||||
|
||||
/* Another special case */
|
||||
if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
|
||||
u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
|
||||
u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
|
||||
|
||||
if (chipDate == 0x20080814 && chipTime == 0x12051100) {
|
||||
@ -373,6 +376,12 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
|
||||
gpu->identity.model = chipModel_GC3000;
|
||||
gpu->identity.revision &= 0xffff;
|
||||
}
|
||||
|
||||
if (etnaviv_is_model_rev(gpu, GC1000, 0x5037) && (chipDate == 0x20120617))
|
||||
gpu->identity.eco_id = 1;
|
||||
|
||||
if (etnaviv_is_model_rev(gpu, GC320, 0x5303) && (chipDate == 0x20140511))
|
||||
gpu->identity.eco_id = 1;
|
||||
}
|
||||
|
||||
dev_info(gpu->dev, "model: GC%x, revision: %x\n",
|
||||
@ -506,7 +515,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
|
||||
/* read idle register. */
|
||||
idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
|
||||
|
||||
/* try reseting again if FE it not idle */
|
||||
/* try resetting again if FE is not idle */
|
||||
if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
|
||||
dev_dbg(gpu->dev, "FE is not idle\n");
|
||||
continue;
|
||||
@ -772,6 +781,14 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
|
||||
gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the GPU is part of a system with DMA addressing limitations,
|
||||
* request pages for our SHM backend buffers from the DMA32 zone to
|
||||
* hopefully avoid performance killing SWIOTLB bounce buffering.
|
||||
*/
|
||||
if (dma_addressing_limited(gpu->dev))
|
||||
priv->shm_gfp_mask |= GFP_DMA32;
|
||||
|
||||
/* Create buffer: */
|
||||
ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer,
|
||||
PAGE_SIZE);
|
||||
@ -851,6 +868,13 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
|
||||
|
||||
verify_dma(gpu, &debug);
|
||||
|
||||
seq_puts(m, "\tidentity\n");
|
||||
seq_printf(m, "\t model: 0x%x\n", gpu->identity.model);
|
||||
seq_printf(m, "\t revision: 0x%x\n", gpu->identity.revision);
|
||||
seq_printf(m, "\t product_id: 0x%x\n", gpu->identity.product_id);
|
||||
seq_printf(m, "\t customer_id: 0x%x\n", gpu->identity.customer_id);
|
||||
seq_printf(m, "\t eco_id: 0x%x\n", gpu->identity.eco_id);
|
||||
|
||||
seq_puts(m, "\tfeatures\n");
|
||||
seq_printf(m, "\t major_features: 0x%08x\n",
|
||||
gpu->identity.features);
|
||||
@ -930,6 +954,20 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
|
||||
seq_puts(m, "\t FP is not idle\n");
|
||||
if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
|
||||
seq_puts(m, "\t TS is not idle\n");
|
||||
if ((idle & VIVS_HI_IDLE_STATE_BL) == 0)
|
||||
seq_puts(m, "\t BL is not idle\n");
|
||||
if ((idle & VIVS_HI_IDLE_STATE_ASYNCFE) == 0)
|
||||
seq_puts(m, "\t ASYNCFE is not idle\n");
|
||||
if ((idle & VIVS_HI_IDLE_STATE_MC) == 0)
|
||||
seq_puts(m, "\t MC is not idle\n");
|
||||
if ((idle & VIVS_HI_IDLE_STATE_PPA) == 0)
|
||||
seq_puts(m, "\t PPA is not idle\n");
|
||||
if ((idle & VIVS_HI_IDLE_STATE_WD) == 0)
|
||||
seq_puts(m, "\t WD is not idle\n");
|
||||
if ((idle & VIVS_HI_IDLE_STATE_NN) == 0)
|
||||
seq_puts(m, "\t NN is not idle\n");
|
||||
if ((idle & VIVS_HI_IDLE_STATE_TP) == 0)
|
||||
seq_puts(m, "\t TP is not idle\n");
|
||||
if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
|
||||
seq_puts(m, "\t AXI low power mode\n");
|
||||
|
||||
@ -1805,11 +1843,15 @@ static int etnaviv_gpu_rpm_suspend(struct device *dev)
|
||||
if (atomic_read(&gpu->sched.hw_rq_count))
|
||||
return -EBUSY;
|
||||
|
||||
/* Check whether the hardware (except FE) is idle */
|
||||
mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
|
||||
/* Check whether the hardware (except FE and MC) is idle */
|
||||
mask = gpu->idle_mask & ~(VIVS_HI_IDLE_STATE_FE |
|
||||
VIVS_HI_IDLE_STATE_MC);
|
||||
idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
|
||||
if (idle != mask)
|
||||
if (idle != mask) {
|
||||
dev_warn_ratelimited(dev, "GPU not yet idle, mask: 0x%08x\n",
|
||||
idle);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
return etnaviv_gpu_hw_suspend(gpu);
|
||||
}
|
||||
|
@ -15,11 +15,11 @@ struct etnaviv_gem_submit;
|
||||
struct etnaviv_vram_mapping;
|
||||
|
||||
struct etnaviv_chip_identity {
|
||||
/* Chip model. */
|
||||
u32 model;
|
||||
|
||||
/* Revision value.*/
|
||||
u32 revision;
|
||||
u32 product_id;
|
||||
u32 customer_id;
|
||||
u32 eco_id;
|
||||
|
||||
/* Supported feature fields. */
|
||||
u32 features;
|
||||
|
@ -6,9 +6,43 @@
|
||||
#include "etnaviv_gpu.h"
|
||||
|
||||
static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
|
||||
{
|
||||
.model = 0x400,
|
||||
.revision = 0x4652,
|
||||
.product_id = 0x70001,
|
||||
.customer_id = 0x100,
|
||||
.eco_id = 0,
|
||||
.stream_count = 4,
|
||||
.register_max = 64,
|
||||
.thread_count = 128,
|
||||
.shader_core_count = 1,
|
||||
.vertex_cache_size = 8,
|
||||
.vertex_output_buffer_size = 1024,
|
||||
.pixel_pipes = 1,
|
||||
.instruction_count = 256,
|
||||
.num_constants = 320,
|
||||
.buffer_size = 0,
|
||||
.varyings_count = 8,
|
||||
.features = 0xa0e9e004,
|
||||
.minor_features0 = 0xe1299fff,
|
||||
.minor_features1 = 0xbe13b219,
|
||||
.minor_features2 = 0xce110010,
|
||||
.minor_features3 = 0x8000001,
|
||||
.minor_features4 = 0x20102,
|
||||
.minor_features5 = 0x120000,
|
||||
.minor_features6 = 0x0,
|
||||
.minor_features7 = 0x0,
|
||||
.minor_features8 = 0x0,
|
||||
.minor_features9 = 0x0,
|
||||
.minor_features10 = 0x0,
|
||||
.minor_features11 = 0x0,
|
||||
},
|
||||
{
|
||||
.model = 0x7000,
|
||||
.revision = 0x6214,
|
||||
.product_id = ~0U,
|
||||
.customer_id = ~0U,
|
||||
.eco_id = ~0U,
|
||||
.stream_count = 16,
|
||||
.register_max = 64,
|
||||
.thread_count = 1024,
|
||||
@ -43,7 +77,13 @@ bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(etnaviv_chip_identities); i++) {
|
||||
if (etnaviv_chip_identities[i].model == ident->model &&
|
||||
etnaviv_chip_identities[i].revision == ident->revision) {
|
||||
etnaviv_chip_identities[i].revision == ident->revision &&
|
||||
(etnaviv_chip_identities[i].product_id == ident->product_id ||
|
||||
etnaviv_chip_identities[i].product_id == ~0U) &&
|
||||
(etnaviv_chip_identities[i].customer_id == ident->customer_id ||
|
||||
etnaviv_chip_identities[i].customer_id == ~0U) &&
|
||||
(etnaviv_chip_identities[i].eco_id == ident->eco_id ||
|
||||
etnaviv_chip_identities[i].eco_id == ~0U)) {
|
||||
memcpy(ident, &etnaviv_chip_identities[i],
|
||||
sizeof(*ident));
|
||||
return true;
|
||||
|
@ -32,6 +32,7 @@ struct etnaviv_pm_domain {
|
||||
};
|
||||
|
||||
struct etnaviv_pm_domain_meta {
|
||||
unsigned int feature;
|
||||
const struct etnaviv_pm_domain *domains;
|
||||
u32 nr_domains;
|
||||
};
|
||||
@ -410,36 +411,78 @@ static const struct etnaviv_pm_domain doms_vg[] = {
|
||||
|
||||
static const struct etnaviv_pm_domain_meta doms_meta[] = {
|
||||
{
|
||||
.feature = chipFeatures_PIPE_3D,
|
||||
.nr_domains = ARRAY_SIZE(doms_3d),
|
||||
.domains = &doms_3d[0]
|
||||
},
|
||||
{
|
||||
.feature = chipFeatures_PIPE_2D,
|
||||
.nr_domains = ARRAY_SIZE(doms_2d),
|
||||
.domains = &doms_2d[0]
|
||||
},
|
||||
{
|
||||
.feature = chipFeatures_PIPE_VG,
|
||||
.nr_domains = ARRAY_SIZE(doms_vg),
|
||||
.domains = &doms_vg[0]
|
||||
}
|
||||
};
|
||||
|
||||
static unsigned int num_pm_domains(const struct etnaviv_gpu *gpu)
|
||||
{
|
||||
unsigned int num = 0, i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(doms_meta); i++) {
|
||||
const struct etnaviv_pm_domain_meta *meta = &doms_meta[i];
|
||||
|
||||
if (gpu->identity.features & meta->feature)
|
||||
num += meta->nr_domains;
|
||||
}
|
||||
|
||||
return num;
|
||||
}
|
||||
|
||||
static const struct etnaviv_pm_domain *pm_domain(const struct etnaviv_gpu *gpu,
|
||||
unsigned int index)
|
||||
{
|
||||
const struct etnaviv_pm_domain *domain = NULL;
|
||||
unsigned int offset = 0, i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(doms_meta); i++) {
|
||||
const struct etnaviv_pm_domain_meta *meta = &doms_meta[i];
|
||||
|
||||
if (!(gpu->identity.features & meta->feature))
|
||||
continue;
|
||||
|
||||
if (meta->nr_domains < (index - offset)) {
|
||||
offset += meta->nr_domains;
|
||||
continue;
|
||||
}
|
||||
|
||||
domain = meta->domains + (index - offset);
|
||||
}
|
||||
|
||||
return domain;
|
||||
}
|
||||
|
||||
int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
|
||||
struct drm_etnaviv_pm_domain *domain)
|
||||
{
|
||||
const struct etnaviv_pm_domain_meta *meta = &doms_meta[domain->pipe];
|
||||
const unsigned int nr_domains = num_pm_domains(gpu);
|
||||
const struct etnaviv_pm_domain *dom;
|
||||
|
||||
if (domain->iter >= meta->nr_domains)
|
||||
if (domain->iter >= nr_domains)
|
||||
return -EINVAL;
|
||||
|
||||
dom = meta->domains + domain->iter;
|
||||
dom = pm_domain(gpu, domain->iter);
|
||||
if (!dom)
|
||||
return -EINVAL;
|
||||
|
||||
domain->id = domain->iter;
|
||||
domain->nr_signals = dom->nr_signals;
|
||||
strncpy(domain->name, dom->name, sizeof(domain->name));
|
||||
|
||||
domain->iter++;
|
||||
if (domain->iter == meta->nr_domains)
|
||||
if (domain->iter == nr_domains)
|
||||
domain->iter = 0xff;
|
||||
|
||||
return 0;
|
||||
@ -448,14 +491,16 @@ int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
|
||||
int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
|
||||
struct drm_etnaviv_pm_signal *signal)
|
||||
{
|
||||
const struct etnaviv_pm_domain_meta *meta = &doms_meta[signal->pipe];
|
||||
const unsigned int nr_domains = num_pm_domains(gpu);
|
||||
const struct etnaviv_pm_domain *dom;
|
||||
const struct etnaviv_pm_signal *sig;
|
||||
|
||||
if (signal->domain >= meta->nr_domains)
|
||||
if (signal->domain >= nr_domains)
|
||||
return -EINVAL;
|
||||
|
||||
dom = meta->domains + signal->domain;
|
||||
dom = pm_domain(gpu, signal->domain);
|
||||
if (!dom)
|
||||
return -EINVAL;
|
||||
|
||||
if (signal->iter >= dom->nr_signals)
|
||||
return -EINVAL;
|
||||
|
@ -46,6 +46,8 @@ DEALINGS IN THE SOFTWARE.
|
||||
|
||||
/* This is a cut-down version of the state_blt.xml.h file */
|
||||
|
||||
#define VIVS_BLT_SET_COMMAND 0x000140ac
|
||||
|
||||
#define VIVS_BLT_ENABLE 0x000140b8
|
||||
#define VIVS_BLT_ENABLE_ENABLE 0x00000001
|
||||
|
||||
|
@ -8,17 +8,17 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
|
||||
git clone git://0x04.net/rules-ng-ng
|
||||
|
||||
The rules-ng-ng source files this header was generated from are:
|
||||
- state.xml ( 26087 bytes, from 2017-12-18 16:51:59)
|
||||
- common.xml ( 35468 bytes, from 2018-01-22 13:48:54)
|
||||
- common_3d.xml ( 14615 bytes, from 2017-12-18 16:51:59)
|
||||
- state_hi.xml ( 30232 bytes, from 2018-02-15 15:48:01)
|
||||
- copyright.xml ( 1597 bytes, from 2016-12-08 16:37:56)
|
||||
- state_2d.xml ( 51552 bytes, from 2016-12-08 16:37:56)
|
||||
- state_3d.xml ( 79992 bytes, from 2017-12-18 16:51:59)
|
||||
- state_blt.xml ( 13405 bytes, from 2017-12-18 16:51:59)
|
||||
- state_vg.xml ( 5975 bytes, from 2016-12-08 16:37:56)
|
||||
- state.xml ( 26666 bytes, from 2019-12-20 21:20:35)
|
||||
- common.xml ( 35468 bytes, from 2018-02-10 13:09:26)
|
||||
- common_3d.xml ( 15058 bytes, from 2019-12-28 20:02:03)
|
||||
- state_hi.xml ( 30552 bytes, from 2019-12-28 20:02:48)
|
||||
- copyright.xml ( 1597 bytes, from 2018-02-10 13:09:26)
|
||||
- state_2d.xml ( 51552 bytes, from 2018-02-10 13:09:26)
|
||||
- state_3d.xml ( 83098 bytes, from 2019-12-28 20:02:03)
|
||||
- state_blt.xml ( 14252 bytes, from 2019-10-20 19:59:15)
|
||||
- state_vg.xml ( 5975 bytes, from 2018-02-10 13:09:26)
|
||||
|
||||
Copyright (C) 2012-2018 by the following authors:
|
||||
Copyright (C) 2012-2019 by the following authors:
|
||||
- Wladimir J. van der Laan <laanwj@gmail.com>
|
||||
- Christian Gmeiner <christian.gmeiner@gmail.com>
|
||||
- Lucas Stach <l.stach@pengutronix.de>
|
||||
@ -48,6 +48,9 @@ DEALINGS IN THE SOFTWARE.
|
||||
#define MMU_EXCEPTION_SLAVE_NOT_PRESENT 0x00000001
|
||||
#define MMU_EXCEPTION_PAGE_NOT_PRESENT 0x00000002
|
||||
#define MMU_EXCEPTION_WRITE_VIOLATION 0x00000003
|
||||
#define MMU_EXCEPTION_OUT_OF_BOUND 0x00000004
|
||||
#define MMU_EXCEPTION_READ_SECURITY_VIOLATION 0x00000005
|
||||
#define MMU_EXCEPTION_WRITE_SECURITY_VIOLATION 0x00000006
|
||||
#define VIVS_HI 0x00000000
|
||||
|
||||
#define VIVS_HI_CLOCK_CONTROL 0x00000000
|
||||
@ -81,6 +84,13 @@ DEALINGS IN THE SOFTWARE.
|
||||
#define VIVS_HI_IDLE_STATE_IM 0x00000200
|
||||
#define VIVS_HI_IDLE_STATE_FP 0x00000400
|
||||
#define VIVS_HI_IDLE_STATE_TS 0x00000800
|
||||
#define VIVS_HI_IDLE_STATE_BL 0x00001000
|
||||
#define VIVS_HI_IDLE_STATE_ASYNCFE 0x00002000
|
||||
#define VIVS_HI_IDLE_STATE_MC 0x00004000
|
||||
#define VIVS_HI_IDLE_STATE_PPA 0x00008000
|
||||
#define VIVS_HI_IDLE_STATE_WD 0x00010000
|
||||
#define VIVS_HI_IDLE_STATE_NN 0x00020000
|
||||
#define VIVS_HI_IDLE_STATE_TP 0x00040000
|
||||
#define VIVS_HI_IDLE_STATE_AXI_LP 0x80000000
|
||||
|
||||
#define VIVS_HI_AXI_CONFIG 0x00000008
|
||||
@ -140,6 +150,8 @@ DEALINGS IN THE SOFTWARE.
|
||||
|
||||
#define VIVS_HI_CHIP_TIME 0x0000002c
|
||||
|
||||
#define VIVS_HI_CHIP_CUSTOMER_ID 0x00000030
|
||||
|
||||
#define VIVS_HI_CHIP_MINOR_FEATURE_0 0x00000034
|
||||
|
||||
#define VIVS_HI_CACHE_CONTROL 0x00000038
|
||||
@ -237,6 +249,8 @@ DEALINGS IN THE SOFTWARE.
|
||||
|
||||
#define VIVS_HI_BLT_INTR 0x000000d4
|
||||
|
||||
#define VIVS_HI_CHIP_ECO_ID 0x000000e8
|
||||
|
||||
#define VIVS_HI_AUXBIT 0x000000ec
|
||||
|
||||
#define VIVS_PM 0x00000000
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user