drm/amdgpu/vcn2.5: Add firmware w/r ptr reset sync
Add firmware write/read point reset sync through shared memory Signed-off-by: James Zhu <James.Zhu@amd.com> Reviewed-by: Leo Liu <leo.liu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
9352141027
commit
c97e3076eb
@ -165,6 +165,8 @@ static int vcn_v2_5_sw_init(void *handle)
|
||||
return r;
|
||||
|
||||
for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
|
||||
volatile struct amdgpu_fw_shared *fw_shared;
|
||||
|
||||
if (adev->vcn.harvest_config & (1 << j))
|
||||
continue;
|
||||
adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
|
||||
@ -207,6 +209,9 @@ static int vcn_v2_5_sw_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
fw_shared = adev->vcn.inst[j].fw_shared_cpu_addr;
|
||||
fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
@ -230,8 +235,16 @@ static int vcn_v2_5_sw_init(void *handle)
|
||||
*/
|
||||
static int vcn_v2_5_sw_fini(void *handle)
|
||||
{
|
||||
int r;
|
||||
int i, r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
volatile struct amdgpu_fw_shared *fw_shared;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
|
||||
if (adev->vcn.harvest_config & (1 << i))
|
||||
continue;
|
||||
fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
|
||||
fw_shared->present_flag_0 = 0;
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_free_mm_table(adev);
|
||||
@ -415,6 +428,15 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
|
||||
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
|
||||
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
|
||||
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
|
||||
|
||||
/* non-cache window */
|
||||
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
|
||||
lower_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr));
|
||||
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
|
||||
upper_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr));
|
||||
WREG32_SOC15(UVD, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
|
||||
WREG32_SOC15(UVD, i, mmUVD_VCPU_NONCACHE_SIZE0,
|
||||
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -498,13 +520,16 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
|
||||
|
||||
/* non-cache window */
|
||||
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
|
||||
UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
|
||||
UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
|
||||
lower_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
|
||||
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
|
||||
UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
|
||||
UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
|
||||
upper_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
|
||||
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
|
||||
UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
|
||||
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
|
||||
UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
|
||||
UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0),
|
||||
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
|
||||
|
||||
/* VCN global tiling registers */
|
||||
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
|
||||
@ -741,6 +766,7 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
|
||||
|
||||
static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
|
||||
{
|
||||
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
|
||||
struct amdgpu_ring *ring;
|
||||
uint32_t rb_bufsz, tmp;
|
||||
|
||||
@ -850,7 +876,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
|
||||
WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS),
|
||||
UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
|
||||
~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
|
||||
fw_shared->multi_queue.decode_queue_mode |= fw_queue_ring_reset;
|
||||
fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
|
||||
|
||||
/* set the write pointer delay */
|
||||
WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
|
||||
@ -874,6 +900,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
|
||||
WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
|
||||
lower_32_bits(ring->wptr));
|
||||
|
||||
fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
|
||||
/* Unstall DPG */
|
||||
WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS),
|
||||
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
|
||||
@ -963,6 +990,7 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
|
||||
vcn_v2_5_mc_resume(adev);
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
|
||||
if (adev->vcn.harvest_config & (1 << i))
|
||||
continue;
|
||||
/* VCN global tiling registers */
|
||||
@ -1036,6 +1064,7 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
|
||||
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
|
||||
WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp);
|
||||
|
||||
fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
|
||||
/* programm the RB_BASE for ring buffer */
|
||||
WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
|
||||
lower_32_bits(ring->gpu_addr));
|
||||
@ -1048,19 +1077,25 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
|
||||
ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR);
|
||||
WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR,
|
||||
lower_32_bits(ring->wptr));
|
||||
fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
|
||||
|
||||
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
|
||||
ring = &adev->vcn.inst[i].ring_enc[0];
|
||||
WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
|
||||
WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4);
|
||||
fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
|
||||
|
||||
fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
|
||||
ring = &adev->vcn.inst[i].ring_enc[1];
|
||||
WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
|
||||
WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
|
||||
fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1381,6 +1416,8 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
|
||||
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
|
||||
|
||||
if (!ret_code) {
|
||||
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
|
||||
|
||||
/* pause DPG */
|
||||
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
|
||||
WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
|
||||
@ -1396,6 +1433,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
|
||||
~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
|
||||
|
||||
/* Restore */
|
||||
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
|
||||
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
|
||||
ring->wptr = 0;
|
||||
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
|
||||
@ -1403,7 +1441,9 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
|
||||
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
|
||||
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
|
||||
fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
|
||||
|
||||
fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
|
||||
ring = &adev->vcn.inst[inst_idx].ring_enc[1];
|
||||
ring->wptr = 0;
|
||||
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
|
||||
@ -1411,9 +1451,12 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
|
||||
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
|
||||
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
|
||||
fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
|
||||
|
||||
fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
|
||||
WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
|
||||
RREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
|
||||
fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
|
||||
|
||||
/* Unstall DPG */
|
||||
WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS),
|
||||
|
Loading…
Reference in New Issue
Block a user