drm/amdgpu: stop disable the scheduler during HW fini
When we stop the HW for example for GPU reset we should not stop the front-end scheduler. Otherwise we run into intermediate failures during command submission. The scheduler should only be stopped in very few cases: 1. We can't get the hardware working in ring or IB test after a GPU reset. 2. The KIQ scheduler is not used in the front-end and should be disabled during GPU reset. 3. In amdgpu_ring_fini() when the driver unloads. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Acked-by: Nirmoy Das <nirmoy.das@amd.com> Test-by: Dennis Li <dennis.li@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
6b6706cdac
commit
1675c3a24d
@ -320,8 +320,6 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
|
||||
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
|
||||
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
|
||||
}
|
||||
sdma0->sched.ready = false;
|
||||
sdma1->sched.ready = false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2442,10 +2442,6 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
|
||||
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
|
||||
if (!enable) {
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
|
||||
adev->gfx.gfx_ring[i].sched.ready = false;
|
||||
}
|
||||
WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
@ -2924,16 +2920,12 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
|
||||
|
||||
static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (enable) {
|
||||
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
|
||||
} else {
|
||||
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
|
||||
(CP_MEC_CNTL__MEC_ME1_HALT_MASK |
|
||||
CP_MEC_CNTL__MEC_ME2_HALT_MASK));
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; i++)
|
||||
adev->gfx.compute_ring[i].sched.ready = false;
|
||||
adev->gfx.kiq.ring.sched.ready = false;
|
||||
}
|
||||
udelay(50);
|
||||
|
@ -1951,7 +1951,6 @@ err1:
|
||||
|
||||
static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
int i;
|
||||
if (enable) {
|
||||
WREG32(mmCP_ME_CNTL, 0);
|
||||
} else {
|
||||
@ -1959,10 +1958,6 @@ static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
|
||||
CP_ME_CNTL__PFP_HALT_MASK |
|
||||
CP_ME_CNTL__CE_HALT_MASK));
|
||||
WREG32(mmSCRATCH_UMSK, 0);
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
|
||||
adev->gfx.gfx_ring[i].sched.ready = false;
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; i++)
|
||||
adev->gfx.compute_ring[i].sched.ready = false;
|
||||
}
|
||||
udelay(50);
|
||||
}
|
||||
|
@ -2432,15 +2432,12 @@ err1:
|
||||
*/
|
||||
static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (enable) {
|
||||
if (enable)
|
||||
WREG32(mmCP_ME_CNTL, 0);
|
||||
} else {
|
||||
WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
|
||||
adev->gfx.gfx_ring[i].sched.ready = false;
|
||||
}
|
||||
else
|
||||
WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK |
|
||||
CP_ME_CNTL__PFP_HALT_MASK |
|
||||
CP_ME_CNTL__CE_HALT_MASK));
|
||||
udelay(50);
|
||||
}
|
||||
|
||||
@ -2701,15 +2698,11 @@ static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
|
||||
*/
|
||||
static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (enable) {
|
||||
if (enable)
|
||||
WREG32(mmCP_MEC_CNTL, 0);
|
||||
} else {
|
||||
WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; i++)
|
||||
adev->gfx.compute_ring[i].sched.ready = false;
|
||||
}
|
||||
else
|
||||
WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK |
|
||||
CP_MEC_CNTL__MEC_ME2_HALT_MASK));
|
||||
udelay(50);
|
||||
}
|
||||
|
||||
|
@ -4122,7 +4122,6 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
|
||||
|
||||
static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
int i;
|
||||
u32 tmp = RREG32(mmCP_ME_CNTL);
|
||||
|
||||
if (enable) {
|
||||
@ -4133,8 +4132,6 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
|
||||
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
|
||||
adev->gfx.gfx_ring[i].sched.ready = false;
|
||||
}
|
||||
WREG32(mmCP_ME_CNTL, tmp);
|
||||
udelay(50);
|
||||
@ -4322,14 +4319,10 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
|
||||
|
||||
static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (enable) {
|
||||
WREG32(mmCP_MEC_CNTL, 0);
|
||||
} else {
|
||||
WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; i++)
|
||||
adev->gfx.compute_ring[i].sched.ready = false;
|
||||
adev->gfx.kiq.ring.sched.ready = false;
|
||||
}
|
||||
udelay(50);
|
||||
|
@ -3095,16 +3095,11 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
|
||||
|
||||
static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
int i;
|
||||
u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
|
||||
|
||||
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
|
||||
if (!enable) {
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
|
||||
adev->gfx.gfx_ring[i].sched.ready = false;
|
||||
}
|
||||
WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
|
||||
udelay(50);
|
||||
}
|
||||
@ -3300,15 +3295,11 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
|
||||
|
||||
static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (enable) {
|
||||
WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
|
||||
} else {
|
||||
WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
|
||||
(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; i++)
|
||||
adev->gfx.compute_ring[i].sched.ready = false;
|
||||
adev->gfx.kiq.ring.sched.ready = false;
|
||||
}
|
||||
udelay(50);
|
||||
|
@ -169,14 +169,11 @@ static int jpeg_v2_0_hw_init(void *handle)
|
||||
static int jpeg_v2_0_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
|
||||
|
||||
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
|
||||
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
|
||||
jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
|
||||
|
||||
ring->sched.ready = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -355,8 +355,6 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
|
||||
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
|
||||
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
|
||||
}
|
||||
sdma0->sched.ready = false;
|
||||
sdma1->sched.ready = false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -529,8 +529,6 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
|
||||
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
|
||||
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
|
||||
}
|
||||
sdma0->sched.ready = false;
|
||||
sdma1->sched.ready = false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -937,8 +937,6 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
|
||||
ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
|
||||
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
|
||||
WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
|
||||
|
||||
sdma[i]->sched.ready = false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -985,8 +983,6 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
|
||||
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL,
|
||||
IB_ENABLE, 0);
|
||||
WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
|
||||
|
||||
sdma[i]->sched.ready = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -502,9 +502,6 @@ static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
|
||||
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
|
||||
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
|
||||
}
|
||||
|
||||
sdma0->sched.ready = false;
|
||||
sdma1->sched.ready = false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -124,7 +124,6 @@ static void si_dma_stop(struct amdgpu_device *adev)
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
ring->sched.ready = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -210,13 +210,10 @@ done:
|
||||
static int uvd_v4_2_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
|
||||
|
||||
if (RREG32(mmUVD_STATUS) != 0)
|
||||
uvd_v4_2_stop(adev);
|
||||
|
||||
ring->sched.ready = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -208,13 +208,10 @@ done:
|
||||
static int uvd_v5_0_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
|
||||
|
||||
if (RREG32(mmUVD_STATUS) != 0)
|
||||
uvd_v5_0_stop(adev);
|
||||
|
||||
ring->sched.ready = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -537,13 +537,10 @@ done:
|
||||
static int uvd_v6_0_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
|
||||
|
||||
if (RREG32(mmUVD_STATUS) != 0)
|
||||
uvd_v6_0_stop(adev);
|
||||
|
||||
ring->sched.ready = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -600,7 +600,6 @@ done:
|
||||
static int uvd_v7_0_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int i;
|
||||
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
uvd_v7_0_stop(adev);
|
||||
@ -609,12 +608,6 @@ static int uvd_v7_0_hw_fini(void *handle)
|
||||
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
|
||||
if (adev->uvd.harvest_config & (1 << i))
|
||||
continue;
|
||||
adev->uvd.inst[i].ring.sched.ready = false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -539,7 +539,6 @@ static int vce_v4_0_hw_init(void *handle)
|
||||
static int vce_v4_0_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int i;
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
/* vce_v4_0_wait_for_idle(handle); */
|
||||
@ -549,9 +548,6 @@ static int vce_v4_0_hw_fini(void *handle)
|
||||
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->vce.num_rings; i++)
|
||||
adev->vce.ring[i].sched.ready = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -227,14 +227,11 @@ done:
|
||||
static int vcn_v1_0_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
|
||||
|
||||
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
|
||||
RREG32_SOC15(VCN, 0, mmUVD_STATUS))
|
||||
vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
|
||||
|
||||
ring->sched.ready = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -252,21 +252,12 @@ done:
|
||||
static int vcn_v2_0_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
|
||||
int i;
|
||||
|
||||
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
|
||||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
|
||||
RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
|
||||
vcn_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
|
||||
|
||||
ring->sched.ready = false;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
|
||||
ring = &adev->vcn.inst->ring_enc[i];
|
||||
ring->sched.ready = false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -308,25 +308,16 @@ done:
|
||||
static int vcn_v2_5_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring;
|
||||
int i, j;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
if (adev->vcn.harvest_config & (1 << i))
|
||||
continue;
|
||||
ring = &adev->vcn.inst[i].ring_dec;
|
||||
|
||||
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
|
||||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
|
||||
RREG32_SOC15(VCN, i, mmUVD_STATUS)))
|
||||
vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
|
||||
|
||||
ring->sched.ready = false;
|
||||
|
||||
for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
|
||||
ring = &adev->vcn.inst[i].ring_enc[j];
|
||||
ring->sched.ready = false;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user