drm/amdgpu: skip access sdma_v5_0 registers under SRIOV (v2)
Due to the new L1.0b0c011b policy, many SDMA registers are blocked which raise the violation warning. There are total 6 pair register needed to be skipped when driver init and de-init. mmSDMA0/1_CNTL mmSDMA0/1_F32_CNTL mmSDMA0/1_UTCL1_PAGE mmSDMA0/1_UTCL1_CNTL mmSDMA0/1_CHICKEN_BITS, mmSDMA0/1_SEM_WAIT_FAIL_TIMER_CNTL v2: squash in warning fix Signed-off-by: Yintian Tao <yttao@amd.com> Reviewed-by: Emily Deng <Emily.Deng@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
1675c3a24d
commit
17e137f27c
@ -88,6 +88,29 @@ static const struct soc15_reg_golden golden_settings_sdma_5[] = {
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x00ffffff, 0x000c5c00)
|
||||
};
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_sdma_5_sriov[] = {
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
};
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_sdma_nv10[] = {
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
|
||||
@ -141,9 +164,14 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
(const u32)ARRAY_SIZE(golden_settings_sdma_nv14));
|
||||
break;
|
||||
case CHIP_NAVI12:
|
||||
soc15_program_register_sequence(adev,
|
||||
golden_settings_sdma_5,
|
||||
(const u32)ARRAY_SIZE(golden_settings_sdma_5));
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
soc15_program_register_sequence(adev,
|
||||
golden_settings_sdma_5_sriov,
|
||||
(const u32)ARRAY_SIZE(golden_settings_sdma_5_sriov));
|
||||
else
|
||||
soc15_program_register_sequence(adev,
|
||||
golden_settings_sdma_5,
|
||||
(const u32)ARRAY_SIZE(golden_settings_sdma_5));
|
||||
soc15_program_register_sequence(adev,
|
||||
golden_settings_sdma_nv12,
|
||||
(const u32)ARRAY_SIZE(golden_settings_sdma_nv12));
|
||||
@ -526,7 +554,7 @@ static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev)
|
||||
*/
|
||||
static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
u32 f32_cntl, phase_quantum = 0;
|
||||
u32 f32_cntl = 0, phase_quantum = 0;
|
||||
int i;
|
||||
|
||||
if (amdgpu_sdma_phase_quantum) {
|
||||
@ -554,9 +582,12 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
|
||||
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
|
||||
AUTO_CTXSW_ENABLE, enable ? 1 : 0);
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
|
||||
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
|
||||
AUTO_CTXSW_ENABLE, enable ? 1 : 0);
|
||||
}
|
||||
|
||||
if (enable && amdgpu_sdma_phase_quantum) {
|
||||
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
|
||||
phase_quantum);
|
||||
@ -565,7 +596,8 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
|
||||
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
|
||||
phase_quantum);
|
||||
}
|
||||
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
|
||||
}
|
||||
|
||||
}
|
||||
@ -588,6 +620,9 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
|
||||
sdma_v5_0_rlc_stop(adev);
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return;
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
|
||||
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
|
||||
@ -620,7 +655,8 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
wb_offset = (ring->rptr_offs * 4);
|
||||
|
||||
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
|
||||
|
||||
/* Set ring buffer size in dwords */
|
||||
rb_bufsz = order_base_2(ring->ring_size / 4);
|
||||
@ -696,26 +732,28 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
|
||||
/* set minor_ptr_update to 0 after wptr programed */
|
||||
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
|
||||
|
||||
/* set utc l1 enable flag always to 1 */
|
||||
temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
|
||||
temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
/* set utc l1 enable flag always to 1 */
|
||||
temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
|
||||
temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
|
||||
|
||||
/* enable MCBP */
|
||||
temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
|
||||
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
|
||||
/* enable MCBP */
|
||||
temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
|
||||
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
|
||||
|
||||
/* Set up RESP_MODE to non-copy addresses */
|
||||
temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
|
||||
temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
|
||||
temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
|
||||
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
|
||||
/* Set up RESP_MODE to non-copy addresses */
|
||||
temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
|
||||
temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
|
||||
temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
|
||||
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
|
||||
|
||||
/* program default cache read and write policy */
|
||||
temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
|
||||
/* clean read policy and write policy bits */
|
||||
temp &= 0xFF0FFF;
|
||||
temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
|
||||
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
|
||||
/* program default cache read and write policy */
|
||||
temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
|
||||
/* clean read policy and write policy bits */
|
||||
temp &= 0xFF0FFF;
|
||||
temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
|
||||
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
|
||||
}
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
/* unhalt engine */
|
||||
@ -1385,14 +1423,16 @@ static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev,
|
||||
{
|
||||
u32 sdma_cntl;
|
||||
|
||||
u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
|
||||
sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
|
||||
sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
|
||||
sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
|
||||
sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
|
||||
|
||||
sdma_cntl = RREG32(reg_offset);
|
||||
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
|
||||
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
|
||||
WREG32(reg_offset, sdma_cntl);
|
||||
sdma_cntl = RREG32(reg_offset);
|
||||
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
|
||||
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
|
||||
WREG32(reg_offset, sdma_cntl);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user