drm/amdgpu: reorganize sdma v4 code to support more instances
This change is needed for Arcturus which has 8 sdma instances. The CG/PG part is not covered for now. Signed-off-by: Le Ma <le.ma@amd.com> Acked-by: Snow Zhang < Snow.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
51c608984f
commit
667a48226e
@ -25,11 +25,17 @@
|
||||
#define __AMDGPU_SDMA_H__
|
||||
|
||||
/* max number of IP instances */
|
||||
#define AMDGPU_MAX_SDMA_INSTANCES 2
|
||||
#define AMDGPU_MAX_SDMA_INSTANCES 8
|
||||
|
||||
enum amdgpu_sdma_irq {
|
||||
AMDGPU_SDMA_IRQ_INSTANCE0 = 0,
|
||||
AMDGPU_SDMA_IRQ_INSTANCE1,
|
||||
AMDGPU_SDMA_IRQ_INSTANCE2,
|
||||
AMDGPU_SDMA_IRQ_INSTANCE3,
|
||||
AMDGPU_SDMA_IRQ_INSTANCE4,
|
||||
AMDGPU_SDMA_IRQ_INSTANCE5,
|
||||
AMDGPU_SDMA_IRQ_INSTANCE6,
|
||||
AMDGPU_SDMA_IRQ_INSTANCE7,
|
||||
AMDGPU_SDMA_IRQ_LAST
|
||||
};
|
||||
|
||||
|
@ -205,8 +205,77 @@ static const struct soc15_reg_golden golden_settings_sdma_rv2[] =
|
||||
static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
|
||||
u32 instance, u32 offset)
|
||||
{
|
||||
return ( 0 == instance ? (adev->reg_offset[SDMA0_HWIP][0][0] + offset) :
|
||||
(adev->reg_offset[SDMA1_HWIP][0][0] + offset));
|
||||
switch (instance) {
|
||||
case 0:
|
||||
return (adev->reg_offset[SDMA0_HWIP][0][0] + offset);
|
||||
case 1:
|
||||
return (adev->reg_offset[SDMA1_HWIP][0][0] + offset);
|
||||
case 2:
|
||||
return (adev->reg_offset[SDMA2_HWIP][0][0] + offset);
|
||||
case 3:
|
||||
return (adev->reg_offset[SDMA3_HWIP][0][0] + offset);
|
||||
case 4:
|
||||
return (adev->reg_offset[SDMA4_HWIP][0][0] + offset);
|
||||
case 5:
|
||||
return (adev->reg_offset[SDMA5_HWIP][0][0] + offset);
|
||||
case 6:
|
||||
return (adev->reg_offset[SDMA6_HWIP][0][0] + offset);
|
||||
case 7:
|
||||
return (adev->reg_offset[SDMA7_HWIP][0][0] + offset);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned sdma_v4_0_seq_to_irq_id(int seq_num)
|
||||
{
|
||||
switch (seq_num) {
|
||||
case 0:
|
||||
return SOC15_IH_CLIENTID_SDMA0;
|
||||
case 1:
|
||||
return SOC15_IH_CLIENTID_SDMA1;
|
||||
case 2:
|
||||
return SOC15_IH_CLIENTID_SDMA2;
|
||||
case 3:
|
||||
return SOC15_IH_CLIENTID_SDMA3;
|
||||
case 4:
|
||||
return SOC15_IH_CLIENTID_SDMA4;
|
||||
case 5:
|
||||
return SOC15_IH_CLIENTID_SDMA5;
|
||||
case 6:
|
||||
return SOC15_IH_CLIENTID_SDMA6;
|
||||
case 7:
|
||||
return SOC15_IH_CLIENTID_SDMA7;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sdma_v4_0_irq_id_to_seq(unsigned client_id)
|
||||
{
|
||||
switch (client_id) {
|
||||
case SOC15_IH_CLIENTID_SDMA0:
|
||||
return 0;
|
||||
case SOC15_IH_CLIENTID_SDMA1:
|
||||
return 1;
|
||||
case SOC15_IH_CLIENTID_SDMA2:
|
||||
return 2;
|
||||
case SOC15_IH_CLIENTID_SDMA3:
|
||||
return 3;
|
||||
case SOC15_IH_CLIENTID_SDMA4:
|
||||
return 4;
|
||||
case SOC15_IH_CLIENTID_SDMA5:
|
||||
return 5;
|
||||
case SOC15_IH_CLIENTID_SDMA6:
|
||||
return 6;
|
||||
case SOC15_IH_CLIENTID_SDMA7:
|
||||
return 7;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
@ -308,7 +377,7 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
|
||||
if (i == 0)
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
|
||||
else
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i);
|
||||
err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
@ -620,26 +689,27 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
|
||||
*/
|
||||
static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
|
||||
struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
|
||||
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
|
||||
u32 rb_cntl, ib_cntl;
|
||||
int i;
|
||||
|
||||
if ((adev->mman.buffer_funcs_ring == sdma0) ||
|
||||
(adev->mman.buffer_funcs_ring == sdma1))
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
int i, unset = 0;
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
sdma[i] = &adev->sdma.instance[i].ring;
|
||||
|
||||
if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
unset = 1;
|
||||
}
|
||||
|
||||
rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
|
||||
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
|
||||
WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
|
||||
ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
|
||||
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
|
||||
WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
|
||||
}
|
||||
|
||||
sdma0->sched.ready = false;
|
||||
sdma1->sched.ready = false;
|
||||
sdma[i]->sched.ready = false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1606,16 +1676,13 @@ static int sdma_v4_0_sw_init(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* SDMA trap event */
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, SDMA0_4_0__SRCID__SDMA_TRAP,
|
||||
&adev->sdma.trap_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* SDMA trap event */
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, SDMA1_4_0__SRCID__SDMA_TRAP,
|
||||
&adev->sdma.trap_irq);
|
||||
if (r)
|
||||
return r;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
|
||||
SDMA0_4_0__SRCID__SDMA_TRAP,
|
||||
&adev->sdma.trap_irq);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
/* SDMA SRAM ECC event */
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, SDMA0_4_0__SRCID__SDMA_SRAM_ECC,
|
||||
@ -1641,11 +1708,8 @@ static int sdma_v4_0_sw_init(void *handle)
|
||||
ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
|
||||
|
||||
sprintf(ring->name, "sdma%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_INSTANCE0 :
|
||||
AMDGPU_SDMA_IRQ_INSTANCE1);
|
||||
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
|
||||
AMDGPU_SDMA_IRQ_INSTANCE0 + i);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -1776,15 +1840,17 @@ static bool sdma_v4_0_is_idle(void *handle)
|
||||
|
||||
static int sdma_v4_0_wait_for_idle(void *handle)
|
||||
{
|
||||
unsigned i;
|
||||
u32 sdma0, sdma1;
|
||||
unsigned i, j;
|
||||
u32 sdma[AMDGPU_MAX_SDMA_INSTANCES];
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
sdma0 = RREG32_SDMA(0, mmSDMA0_STATUS_REG);
|
||||
sdma1 = RREG32_SDMA(1, mmSDMA0_STATUS_REG);
|
||||
|
||||
if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
|
||||
for (j = 0; j < adev->sdma.num_instances; j++) {
|
||||
sdma[j] = RREG32_SDMA(j, mmSDMA0_STATUS_REG);
|
||||
if (!(sdma[j] & SDMA0_STATUS_REG__IDLE_MASK))
|
||||
break;
|
||||
}
|
||||
if (j == adev->sdma.num_instances)
|
||||
return 0;
|
||||
udelay(1);
|
||||
}
|
||||
@ -1820,17 +1886,7 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
|
||||
uint32_t instance;
|
||||
|
||||
DRM_DEBUG("IH: SDMA trap\n");
|
||||
switch (entry->client_id) {
|
||||
case SOC15_IH_CLIENTID_SDMA0:
|
||||
instance = 0;
|
||||
break;
|
||||
case SOC15_IH_CLIENTID_SDMA1:
|
||||
instance = 1;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
|
||||
switch (entry->ring_id) {
|
||||
case 0:
|
||||
amdgpu_fence_process(&adev->sdma.instance[instance].ring);
|
||||
|
Loading…
Reference in New Issue
Block a user