From 63fbf42f7307a5911237fed3285e669d9d4d0d1a Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Fri, 15 Jul 2016 11:19:20 +0800 Subject: [PATCH 001/171] drm/amdgpu: add check_soft_reset ip func MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It is used to identify if the ip block is hang. Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 23 ++++++++++++++++++++++ drivers/gpu/drm/amd/include/amd_shared.h | 2 ++ 3 files changed, 26 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 8ebc5f1eb4c0..a21b342ab2a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1956,6 +1956,7 @@ struct amdgpu_ip_block_status { bool valid; bool sw; bool hw; + bool hang; }; struct amdgpu_device { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index df7ab2458e50..fd7698a56f3b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1937,6 +1937,24 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) return 0; } +static bool amdgpu_check_soft_reset(struct amdgpu_device *adev) +{ + int i; + bool asic_hang = false; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_blocks[i].funcs->check_soft_reset) + adev->ip_blocks[i].funcs->check_soft_reset(adev); + if (adev->ip_block_status[i].hang) { + DRM_INFO("IP block:%d is hang!\n", i); + asic_hang = true; + } + } + return asic_hang; +} + /** * amdgpu_gpu_reset - reset the asic * @@ -1950,6 +1968,11 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) int i, r; int resched; + if (!amdgpu_check_soft_reset(adev)) { + DRM_INFO("No hardware hang detected. Did some blocks stall?\n"); + return 0; + } + atomic_inc(&adev->gpu_reset_counter); /* block TTM */ diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index a74a0d2ff1ca..fe4aa09252ab 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -159,6 +159,8 @@ struct amd_ip_funcs { bool (*is_idle)(void *handle); /* poll for idle */ int (*wait_for_idle)(void *handle); + /* check soft reset the IP block */ + int (*check_soft_reset)(void *handle); /* soft reset the IP block */ int (*soft_reset)(void *handle); /* enable/disable cg for the IP block */ From 3d7c63849072747eaba6b5d35671bd9cd2d002c1 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Fri, 15 Jul 2016 11:28:30 +0800 Subject: [PATCH 002/171] drm/amdgpu: implement gfx8 check_soft_reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 + drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 145 +++++++++++++++----------- 2 files changed, 88 insertions(+), 61 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index a21b342ab2a3..b6e8e7a825f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1195,6 +1195,10 @@ struct amdgpu_gfx { unsigned ce_ram_size; struct amdgpu_cu_info cu_info; const struct amdgpu_gfx_funcs *funcs; + + /* reset mask */ + uint32_t grbm_soft_reset; + uint32_t srbm_soft_reset; }; int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index bff8668e9e6d..87232397dbd0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5047,11 +5047,11 @@ static int gfx_v8_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int gfx_v8_0_soft_reset(void *handle) +static int gfx_v8_0_check_soft_reset(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* GRBM_STATUS */ tmp = RREG32(mmGRBM_STATUS); @@ -5060,16 +5060,12 @@ static int gfx_v8_0_soft_reset(void *handle) GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | - GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) { + GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK | + GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1); grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX, 1); - } - - if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { - grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, - GRBM_SOFT_RESET, SOFT_RESET_CP, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); } @@ -5080,73 +5076,99 @@ static int gfx_v8_0_soft_reset(void *handle) grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); + if (REG_GET_FIELD(tmp, GRBM_STATUS2, CPF_BUSY) || + REG_GET_FIELD(tmp, GRBM_STATUS2, CPC_BUSY) || + REG_GET_FIELD(tmp, GRBM_STATUS2, CPG_BUSY)) { + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, + SOFT_RESET_CPF, 1); + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, + SOFT_RESET_CPC, 1); + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, + SOFT_RESET_CPG, 1); + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, + SOFT_RESET_GRBM, 1); + } + /* SRBM_STATUS */ tmp = RREG32(mmSRBM_STATUS); if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING)) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); + if (REG_GET_FIELD(tmp, SRBM_STATUS, SEM_BUSY)) + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, + SRBM_SOFT_RESET, SOFT_RESET_SEM, 1); if (grbm_soft_reset || srbm_soft_reset) { - /* stop the rlc */ - gfx_v8_0_rlc_stop(adev); + adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = true; + adev->gfx.grbm_soft_reset = grbm_soft_reset; + adev->gfx.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = false; + adev->gfx.grbm_soft_reset = 0; + adev->gfx.srbm_soft_reset = 0; + } - /* Disable GFX parsing/prefetching */ - gfx_v8_0_cp_gfx_enable(adev, false); + return 0; +} - /* Disable MEC parsing/prefetching */ - gfx_v8_0_cp_compute_enable(adev, false); +static int gfx_v8_0_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 grbm_soft_reset = 0, srbm_soft_reset = 0; + u32 tmp; - if (grbm_soft_reset || srbm_soft_reset) { - tmp = RREG32(mmGMCON_DEBUG); - tmp = REG_SET_FIELD(tmp, - GMCON_DEBUG, GFX_STALL, 1); - tmp = REG_SET_FIELD(tmp, - GMCON_DEBUG, GFX_CLEAR, 1); - WREG32(mmGMCON_DEBUG, tmp); + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) + return 0; - udelay(50); - } + grbm_soft_reset = adev->gfx.grbm_soft_reset; + srbm_soft_reset = adev->gfx.srbm_soft_reset; - if (grbm_soft_reset) { - tmp = RREG32(mmGRBM_SOFT_RESET); - tmp |= grbm_soft_reset; - dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmGRBM_SOFT_RESET, tmp); - tmp = RREG32(mmGRBM_SOFT_RESET); - - udelay(50); - - tmp &= ~grbm_soft_reset; - WREG32(mmGRBM_SOFT_RESET, tmp); - tmp = RREG32(mmGRBM_SOFT_RESET); - } - - if (srbm_soft_reset) { - tmp = RREG32(mmSRBM_SOFT_RESET); - tmp |= srbm_soft_reset; - dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - udelay(50); - - tmp &= ~srbm_soft_reset; - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - } - - if (grbm_soft_reset || srbm_soft_reset) { - tmp = RREG32(mmGMCON_DEBUG); - tmp = REG_SET_FIELD(tmp, - GMCON_DEBUG, GFX_STALL, 0); - tmp = REG_SET_FIELD(tmp, - GMCON_DEBUG, GFX_CLEAR, 0); - WREG32(mmGMCON_DEBUG, tmp); - } - - /* Wait a little for things to settle down */ + if (grbm_soft_reset || srbm_soft_reset) { + tmp = RREG32(mmGMCON_DEBUG); + tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 1); + tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 1); + WREG32(mmGMCON_DEBUG, tmp); udelay(50); } + + if (grbm_soft_reset) { + tmp = RREG32(mmGRBM_SOFT_RESET); + tmp |= grbm_soft_reset; + dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmGRBM_SOFT_RESET, tmp); + tmp = RREG32(mmGRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~grbm_soft_reset; + WREG32(mmGRBM_SOFT_RESET, tmp); + tmp = RREG32(mmGRBM_SOFT_RESET); + } + + if (srbm_soft_reset) { + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + } + + if (grbm_soft_reset || srbm_soft_reset) { + tmp = RREG32(mmGMCON_DEBUG); + tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 0); + tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 0); + WREG32(mmGMCON_DEBUG, tmp); + } + + /* Wait a little for things to settle down */ + udelay(50); + return 0; } @@ -6334,6 +6356,7 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .resume = gfx_v8_0_resume, .is_idle = gfx_v8_0_is_idle, .wait_for_idle = gfx_v8_0_wait_for_idle, + .check_soft_reset = gfx_v8_0_check_soft_reset, .soft_reset = gfx_v8_0_soft_reset, .set_clockgating_state = gfx_v8_0_set_clockgating_state, .set_powergating_state = gfx_v8_0_set_powergating_state, From d31a501ead7fb21b85c6f34a9dd79b07c6417711 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Mon, 18 Jul 2016 10:04:34 +0800 Subject: [PATCH 003/171] drm/amdgpu: add pre_soft_reset ip func MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It will be used before soft_reset to do some preparing work for reset. Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 17 +++++++++++++++++ drivers/gpu/drm/amd/include/amd_shared.h | 2 ++ 2 files changed, 19 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index fd7698a56f3b..b88620586c8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1955,6 +1955,23 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev) return asic_hang; } +int amdgpu_pre_soft_reset(struct amdgpu_device *adev) +{ + int i, r = 0; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_blocks[i].funcs->pre_soft_reset) { + r = adev->ip_blocks[i].funcs->pre_soft_reset(adev); + if (r) + return r; + } + } + + return 0; +} + /** * amdgpu_gpu_reset - reset the asic * diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index fe4aa09252ab..2a2a5aa39b99 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -161,6 +161,8 @@ struct amd_ip_funcs { int (*wait_for_idle)(void *handle); /* check soft reset the IP block */ int (*check_soft_reset)(void *handle); + /* pre soft reset the IP block */ + int (*pre_soft_reset)(void *handle); /* soft reset the IP block */ int (*soft_reset)(void *handle); /* enable/disable cg for the IP block */ From 1057f20c2252c5eba17e4a9bbf74ea990411f46c Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Fri, 15 Jul 2016 15:18:25 +0800 Subject: [PATCH 004/171] drm/amdgpu: add gfx8 pre soft reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 58 +++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 87232397dbd0..62ba7e550aee 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5111,6 +5111,63 @@ static int gfx_v8_0_check_soft_reset(void *handle) return 0; } +static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + int i; + + vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) { + u32 tmp; + tmp = RREG32(mmCP_HQD_DEQUEUE_REQUEST); + tmp = REG_SET_FIELD(tmp, CP_HQD_DEQUEUE_REQUEST, + DEQUEUE_REQ, 2); + WREG32(mmCP_HQD_DEQUEUE_REQUEST, tmp); + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK)) + break; + udelay(1); + } + } +} + +static int gfx_v8_0_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 grbm_soft_reset = 0, srbm_soft_reset = 0; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) + return 0; + + grbm_soft_reset = adev->gfx.grbm_soft_reset; + srbm_soft_reset = adev->gfx.srbm_soft_reset; + + /* stop the rlc */ + gfx_v8_0_rlc_stop(adev); + + if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) + /* Disable GFX parsing/prefetching */ + gfx_v8_0_cp_gfx_enable(adev, false); + + if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) { + int i; + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; + + gfx_v8_0_inactive_hqd(adev, ring); + } + /* Disable MEC parsing/prefetching */ + gfx_v8_0_cp_compute_enable(adev, false); + } + + return 0; +} + static int gfx_v8_0_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -6357,6 +6414,7 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .is_idle = gfx_v8_0_is_idle, .wait_for_idle = gfx_v8_0_wait_for_idle, .check_soft_reset = gfx_v8_0_check_soft_reset, + .pre_soft_reset = gfx_v8_0_pre_soft_reset, .soft_reset = gfx_v8_0_soft_reset, .set_clockgating_state = gfx_v8_0_set_clockgating_state, .set_powergating_state = gfx_v8_0_set_powergating_state, From 35d782feae7f0b817016315d8718a82c61968894 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Fri, 15 Jul 2016 15:57:13 +0800 Subject: [PATCH 005/171] drm/amdgpu: add amdgpu soft reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Check gpu status first, if MC/VMC/DISPLAY hang, directly triger full reset. If engine hangs, then triger engine soft reset, if soft reset fails, will fallback to full reset. Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 104 +++++++++++++++++---- drivers/gpu/drm/amd/include/amd_shared.h | 2 + 2 files changed, 88 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index b88620586c8e..2bd2b19d4666 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1962,7 +1962,8 @@ int amdgpu_pre_soft_reset(struct amdgpu_device *adev) for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_block_status[i].valid) continue; - if (adev->ip_blocks[i].funcs->pre_soft_reset) { + if (adev->ip_block_status[i].hang && + adev->ip_blocks[i].funcs->pre_soft_reset) { r = adev->ip_blocks[i].funcs->pre_soft_reset(adev); if (r) return r; @@ -1972,6 +1973,58 @@ int amdgpu_pre_soft_reset(struct amdgpu_device *adev) return 0; } +static bool amdgpu_need_full_reset(struct amdgpu_device *adev) +{ + if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang || + adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang || + adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang || + adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang || + adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang || + adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang || + adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang || + adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang || + adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) { + DRM_INFO("Some block need full reset!\n"); + return true; + } + return false; +} + +static int amdgpu_soft_reset(struct amdgpu_device *adev) +{ + int i, r = 0; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_block_status[i].hang && + adev->ip_blocks[i].funcs->soft_reset) { + r = adev->ip_blocks[i].funcs->soft_reset(adev); + if (r) + return r; + } + } + + return 0; +} + +static int amdgpu_post_soft_reset(struct amdgpu_device *adev) +{ + int i, r = 0; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_block_status[i].hang && + adev->ip_blocks[i].funcs->post_soft_reset) + r = adev->ip_blocks[i].funcs->post_soft_reset(adev); + if (r) + return r; + } + + return 0; +} + /** * amdgpu_gpu_reset - reset the asic * @@ -1984,6 +2037,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) { int i, r; int resched; + bool need_full_reset; if (!amdgpu_check_soft_reset(adev)) { DRM_INFO("No hardware hang detected. Did some blocks stall?\n"); @@ -2007,28 +2061,42 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ amdgpu_fence_driver_force_completion(adev); - /* save scratch */ - amdgpu_atombios_scratch_regs_save(adev); - r = amdgpu_suspend(adev); + need_full_reset = amdgpu_need_full_reset(adev); + + if (!need_full_reset) { + amdgpu_pre_soft_reset(adev); + r = amdgpu_soft_reset(adev); + amdgpu_post_soft_reset(adev); + if (r || amdgpu_check_soft_reset(adev)) { + DRM_INFO("soft reset failed, will fallback to full reset!\n"); + need_full_reset = true; + } + } + + if (need_full_reset) { + /* save scratch */ + amdgpu_atombios_scratch_regs_save(adev); + r = amdgpu_suspend(adev); retry: - /* Disable fb access */ - if (adev->mode_info.num_crtc) { - struct amdgpu_mode_mc_save save; - amdgpu_display_stop_mc_access(adev, &save); - amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); - } + /* Disable fb access */ + if (adev->mode_info.num_crtc) { + struct amdgpu_mode_mc_save save; + amdgpu_display_stop_mc_access(adev, &save); + amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); + } - r = amdgpu_asic_reset(adev); - /* post card */ - amdgpu_atom_asic_init(adev->mode_info.atom_context); + r = amdgpu_asic_reset(adev); + /* post card */ + amdgpu_atom_asic_init(adev->mode_info.atom_context); - if (!r) { - dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); - r = amdgpu_resume(adev); + if (!r) { + dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); + r = amdgpu_resume(adev); + } + /* restore scratch */ + amdgpu_atombios_scratch_regs_restore(adev); } - /* restore scratch */ - amdgpu_atombios_scratch_regs_restore(adev); if (!r) { r = amdgpu_ib_ring_tests(adev); if (r) { diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 2a2a5aa39b99..db710418f35f 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -165,6 +165,8 @@ struct amd_ip_funcs { int (*pre_soft_reset)(void *handle); /* soft reset the IP block */ int (*soft_reset)(void *handle); + /* post soft reset the IP block */ + int (*post_soft_reset)(void *handle); /* enable/disable cg for the IP block */ int (*set_clockgating_state)(void *handle, enum amd_clockgating_state state); From e4ae0fc3363191f31fb9627fff9f88d43523aac7 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Fri, 15 Jul 2016 16:24:25 +0800 Subject: [PATCH 006/171] drm/amdgpu: implement gfx8 post_soft_reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 - drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 44 ++++++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 2bd2b19d4666..1e553663e47d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1978,7 +1978,6 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev) if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang || - adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang || diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 62ba7e550aee..af0efa250d80 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5229,6 +5229,49 @@ static int gfx_v8_0_soft_reset(void *handle) return 0; } +static void gfx_v8_0_init_hqd(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0); + WREG32(mmCP_HQD_PQ_RPTR, 0); + WREG32(mmCP_HQD_PQ_WPTR, 0); + vi_srbm_select(adev, 0, 0, 0, 0); +} + +static int gfx_v8_0_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 grbm_soft_reset = 0, srbm_soft_reset = 0; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) + return 0; + + grbm_soft_reset = adev->gfx.grbm_soft_reset; + srbm_soft_reset = adev->gfx.srbm_soft_reset; + + if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) + gfx_v8_0_cp_gfx_resume(adev); + + if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) { + int i; + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; + + gfx_v8_0_init_hqd(adev, ring); + } + gfx_v8_0_cp_compute_resume(adev); + } + gfx_v8_0_rlc_start(adev); + + return 0; +} + /** * gfx_v8_0_get_gpu_clock_counter - return GPU clock counter snapshot * @@ -6416,6 +6459,7 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .check_soft_reset = gfx_v8_0_check_soft_reset, .pre_soft_reset = gfx_v8_0_pre_soft_reset, .soft_reset = gfx_v8_0_soft_reset, + .post_soft_reset = gfx_v8_0_post_soft_reset, .set_clockgating_state = gfx_v8_0_set_clockgating_state, .set_powergating_state = gfx_v8_0_set_powergating_state, }; From e702a68051b174e87df62bbc0204809b5c1ba3e5 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Wed, 13 Jul 2016 10:28:56 +0800 Subject: [PATCH 007/171] drm/amdgpu: implement sdma3 check/pre/post_soft_reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 - drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 80 ++++++++++++++++++---- 3 files changed, 68 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index b6e8e7a825f8..17600b477cf9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1733,6 +1733,7 @@ struct amdgpu_sdma { struct amdgpu_irq_src trap_irq; struct amdgpu_irq_src illegal_inst_irq; int num_instances; + uint32_t srbm_soft_reset; }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 1e553663e47d..47f29f9e6df5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1978,7 +1978,6 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev) if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang || - adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang || diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 653ce5ed55ae..3a63a4fcf807 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -1320,27 +1320,78 @@ static int sdma_v3_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int sdma_v3_0_soft_reset(void *handle) +static int sdma_v3_0_check_soft_reset(void *handle) { - u32 srbm_soft_reset = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS2); - if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { - /* sdma0 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); - tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0); - WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); + if ((tmp & SRBM_STATUS2__SDMA_BUSY_MASK) || + (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)) { srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; - } - if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { - /* sdma1 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); - tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0); - WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; } + if (srbm_soft_reset) { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = true; + adev->sdma.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = false; + adev->sdma.srbm_soft_reset = 0; + } + + return 0; +} + +static int sdma_v3_0_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) + return 0; + + srbm_soft_reset = adev->sdma.srbm_soft_reset; + + if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) || + REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) { + sdma_v3_0_ctx_switch_enable(adev, false); + sdma_v3_0_enable(adev, false); + } + + return 0; +} + +static int sdma_v3_0_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) + return 0; + + srbm_soft_reset = adev->sdma.srbm_soft_reset; + + if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) || + REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) { + sdma_v3_0_gfx_resume(adev); + sdma_v3_0_rlc_resume(adev); + } + + return 0; +} + +static int sdma_v3_0_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; + u32 tmp; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) + return 0; + + srbm_soft_reset = adev->sdma.srbm_soft_reset; + if (srbm_soft_reset) { tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; @@ -1559,6 +1610,9 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = { .resume = sdma_v3_0_resume, .is_idle = sdma_v3_0_is_idle, .wait_for_idle = sdma_v3_0_wait_for_idle, + .check_soft_reset = sdma_v3_0_check_soft_reset, + .pre_soft_reset = sdma_v3_0_pre_soft_reset, + .post_soft_reset = sdma_v3_0_post_soft_reset, .soft_reset = sdma_v3_0_soft_reset, .set_clockgating_state = sdma_v3_0_set_clockgating_state, .set_powergating_state = sdma_v3_0_set_powergating_state, From 50b0197abfa062d05f5f14a94e04ed7fd45cb003 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Mon, 18 Jul 2016 16:59:24 +0800 Subject: [PATCH 008/171] drm/amdgpu: implement gmc8 check/pre/post_soft_reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 + drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 59 ++++++++++++++++++++++----- 2 files changed, 51 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 17600b477cf9..e4a731bc9fc1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -677,6 +677,8 @@ struct amdgpu_mc { uint32_t fw_version; struct amdgpu_irq_src vm_fault; uint32_t vram_type; + uint32_t srbm_soft_reset; + struct amdgpu_mode_mc_save save; }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 717359d3ba8c..0a23b8394705 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1092,9 +1092,8 @@ static int gmc_v8_0_wait_for_idle(void *handle) } -static int gmc_v8_0_soft_reset(void *handle) +static int gmc_v8_0_check_soft_reset(void *handle) { - struct amdgpu_mode_mc_save save; u32 srbm_soft_reset = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS); @@ -1109,13 +1108,42 @@ static int gmc_v8_0_soft_reset(void *handle) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1); } + if (srbm_soft_reset) { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = true; + adev->mc.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = false; + adev->mc.srbm_soft_reset = 0; + } + return 0; +} + +static int gmc_v8_0_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) + return 0; + + gmc_v8_0_mc_stop(adev, &adev->mc.save); + if (gmc_v8_0_wait_for_idle(adev)) { + dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); + } + + return 0; +} + +static int gmc_v8_0_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) + return 0; + srbm_soft_reset = adev->mc.srbm_soft_reset; if (srbm_soft_reset) { - gmc_v8_0_mc_stop(adev, &save); - if (gmc_v8_0_wait_for_idle((void *)adev)) { - dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); - } - + u32 tmp; tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; @@ -1131,14 +1159,22 @@ static int gmc_v8_0_soft_reset(void *handle) /* Wait a little for things to settle down */ udelay(50); - - gmc_v8_0_mc_resume(adev, &save); - udelay(50); } return 0; } +static int gmc_v8_0_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) + return 0; + + gmc_v8_0_mc_resume(adev, &adev->mc.save); + return 0; +} + static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, @@ -1406,7 +1442,10 @@ const struct amd_ip_funcs gmc_v8_0_ip_funcs = { .resume = gmc_v8_0_resume, .is_idle = gmc_v8_0_is_idle, .wait_for_idle = gmc_v8_0_wait_for_idle, + .check_soft_reset = gmc_v8_0_check_soft_reset, + .pre_soft_reset = gmc_v8_0_pre_soft_reset, .soft_reset = gmc_v8_0_soft_reset, + .post_soft_reset = gmc_v8_0_post_soft_reset, .set_clockgating_state = gmc_v8_0_set_clockgating_state, .set_powergating_state = gmc_v8_0_set_powergating_state, }; From 1015a1b1750e578868a96d812d388d3c65d7faaf Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Mon, 18 Jul 2016 17:02:57 +0800 Subject: [PATCH 009/171] drm/amdgpu: implement vi ih check/pre/post_soft_reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h | 1 + drivers/gpu/drm/amd/amdgpu/tonga_ih.c | 49 +++++++++++++++++++++- 3 files changed, 48 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 47f29f9e6df5..3640b124851e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1976,7 +1976,6 @@ int amdgpu_pre_soft_reset(struct amdgpu_device *adev) static bool amdgpu_need_full_reset(struct amdgpu_device *adev) { if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang || - adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang || diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index 7ef09352e534..f016464035b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -70,6 +70,7 @@ struct amdgpu_irq { /* gen irq stuff */ struct irq_domain *domain; /* GPU irq controller domain */ unsigned virq[AMDGPU_MAX_IRQ_SRC_ID]; + uint32_t srbm_soft_reset; }; void amdgpu_irq_preinstall(struct drm_device *dev); diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index c92055805a45..d127d59f953a 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -373,10 +373,10 @@ static int tonga_ih_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int tonga_ih_soft_reset(void *handle) +static int tonga_ih_check_soft_reset(void *handle) { - u32 srbm_soft_reset = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & SRBM_STATUS__IH_BUSY_MASK) @@ -384,6 +384,48 @@ static int tonga_ih_soft_reset(void *handle) SOFT_RESET_IH, 1); if (srbm_soft_reset) { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = true; + adev->irq.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = false; + adev->irq.srbm_soft_reset = 0; + } + + return 0; +} + +static int tonga_ih_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) + return 0; + + return tonga_ih_hw_fini(adev); +} + +static int tonga_ih_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) + return 0; + + return tonga_ih_hw_init(adev); +} + +static int tonga_ih_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) + return 0; + srbm_soft_reset = adev->irq.srbm_soft_reset; + + if (srbm_soft_reset) { + u32 tmp; + tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); @@ -427,7 +469,10 @@ const struct amd_ip_funcs tonga_ih_ip_funcs = { .resume = tonga_ih_resume, .is_idle = tonga_ih_is_idle, .wait_for_idle = tonga_ih_wait_for_idle, + .check_soft_reset = tonga_ih_check_soft_reset, + .pre_soft_reset = tonga_ih_pre_soft_reset, .soft_reset = tonga_ih_soft_reset, + .post_soft_reset = tonga_ih_post_soft_reset, .set_clockgating_state = tonga_ih_set_clockgating_state, .set_powergating_state = tonga_ih_set_powergating_state, }; From fc0b3b90b7e8c761372edc9d5661d0c4421ac116 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Mon, 18 Jul 2016 17:18:01 +0800 Subject: [PATCH 010/171] drm/amdgpu: implement UVD6 check/pre/post_soft_reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 - drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 73 ++++++++++++++++++++-- 3 files changed, 70 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index e4a731bc9fc1..108e04fc0987 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1689,6 +1689,7 @@ struct amdgpu_uvd { bool address_64_bit; bool use_ctx_buf; struct amd_sched_entity entity; + uint32_t srbm_soft_reset; }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3640b124851e..309e58cd1263 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1977,7 +1977,6 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev) { if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang || - adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 7f21102bfb99..4fa50918e886 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -754,14 +754,76 @@ static int uvd_v6_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int uvd_v6_0_soft_reset(void *handle) +#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd +static int uvd_v6_0_check_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; + u32 tmp = RREG32(mmSRBM_STATUS); + + if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) || + REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) || + (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK)) + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); + + if (srbm_soft_reset) { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = true; + adev->uvd.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = false; + adev->uvd.srbm_soft_reset = 0; + } + return 0; +} +static int uvd_v6_0_pre_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - uvd_v6_0_stop(adev); + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) + return 0; + + uvd_v6_0_stop(adev); + return 0; +} + +static int uvd_v6_0_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) + return 0; + srbm_soft_reset = adev->uvd.srbm_soft_reset; + + if (srbm_soft_reset) { + u32 tmp; + + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + /* Wait a little for things to settle down */ + udelay(50); + } + + return 0; +} + +static int uvd_v6_0_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) + return 0; - WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, - ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); mdelay(5); return uvd_v6_0_start(adev); @@ -966,7 +1028,10 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = { .resume = uvd_v6_0_resume, .is_idle = uvd_v6_0_is_idle, .wait_for_idle = uvd_v6_0_wait_for_idle, + .check_soft_reset = uvd_v6_0_check_soft_reset, + .pre_soft_reset = uvd_v6_0_pre_soft_reset, .soft_reset = uvd_v6_0_soft_reset, + .post_soft_reset = uvd_v6_0_post_soft_reset, .set_clockgating_state = uvd_v6_0_set_clockgating_state, .set_powergating_state = uvd_v6_0_set_powergating_state, }; From 115933a57779ec453a629331d08bb8e8d124f0d5 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Mon, 18 Jul 2016 17:38:50 +0800 Subject: [PATCH 011/171] drm/amdgpu: implement vce3 check/post_soft_reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 115 ++++++++++++++++++++++++-- 2 files changed, 109 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 108e04fc0987..e9364687cd95 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1716,6 +1716,7 @@ struct amdgpu_vce { struct amdgpu_irq_src irq; unsigned harvest_config; struct amd_sched_entity entity; + uint32_t srbm_soft_reset; }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index c271abffd8dd..e5b18ad42721 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -37,6 +37,9 @@ #include "gca/gfx_8_0_d.h" #include "smu/smu_7_1_2_d.h" #include "smu/smu_7_1_2_sh_mask.h" +#include "gca/gfx_8_0_d.h" +#include "gca/gfx_8_0_sh_mask.h" + #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 @@ -601,20 +604,115 @@ static int vce_v3_0_wait_for_idle(void *handle) return -ETIMEDOUT; } +#define AMDGPU_VCE_STATUS_BUSY_MASK 0x78 + +static int vce_v3_0_check_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; + u32 tmp; + + /* VCE BUG: it is always busy, so skip its checking now */ + return 0; + + /* According to VCE team , we should use VCE_STATUS instead + * SRBM_STATUS.VCE_BUSY bit for busy status checking. + * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE + * instance's registers are accessed + * (0 for 1st instance, 10 for 2nd instance). + * + *VCE_STATUS + *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB | + *|----+----+-----------+----+----+----+----------+---------+----| + *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0| + * + * VCE team suggest use bit 3--bit 6 for busy status check + */ + tmp = RREG32(mmGRBM_GFX_INDEX); + tmp = REG_SET_FIELD(tmp, GRBM_GFX_INDEX, INSTANCE_INDEX, 0); + WREG32(mmGRBM_GFX_INDEX, tmp); + if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); + } + tmp = RREG32(mmGRBM_GFX_INDEX); + tmp = REG_SET_FIELD(tmp, GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); + WREG32(mmGRBM_GFX_INDEX, tmp); + if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); + } + tmp = RREG32(mmGRBM_GFX_INDEX); + tmp = REG_SET_FIELD(tmp, GRBM_GFX_INDEX, INSTANCE_INDEX, 0); + WREG32(mmGRBM_GFX_INDEX, tmp); + + if (adev->vce.harvest_config & (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) + srbm_soft_reset = 0; + + if (srbm_soft_reset) { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true; + adev->vce.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = false; + adev->vce.srbm_soft_reset = 0; + } + return 0; +} + static int vce_v3_0_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 mask = 0; + u32 srbm_soft_reset; - mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK; - mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK; + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) + return 0; + srbm_soft_reset = adev->vce.srbm_soft_reset; + + if (srbm_soft_reset) { + u32 tmp; + + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + /* Wait a little for things to settle down */ + udelay(50); + } + + return 0; +} + +static int vce_v3_0_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) + return 0; - WREG32_P(mmSRBM_SOFT_RESET, mask, - ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK | - SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK)); mdelay(5); - return vce_v3_0_start(adev); + return vce_v3_0_suspend(adev); +} + + +static int vce_v3_0_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) + return 0; + + mdelay(5); + + return vce_v3_0_resume(adev); } static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev, @@ -751,7 +849,10 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = { .resume = vce_v3_0_resume, .is_idle = vce_v3_0_is_idle, .wait_for_idle = vce_v3_0_wait_for_idle, + .check_soft_reset = vce_v3_0_check_soft_reset, + .pre_soft_reset = vce_v3_0_pre_soft_reset, .soft_reset = vce_v3_0_soft_reset, + .post_soft_reset = vce_v3_0_post_soft_reset, .set_clockgating_state = vce_v3_0_set_clockgating_state, .set_powergating_state = vce_v3_0_set_powergating_state, }; From 81e04e1809057128abe1b996ca7bc0cbb38e56c9 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Wed, 20 Jul 2016 13:42:25 +0800 Subject: [PATCH 012/171] drm/amdgpu: add dce10 check_soft_reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index c1b04e9aab57..7f642b57223e 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -3141,11 +3141,26 @@ static int dce_v10_0_wait_for_idle(void *handle) return 0; } +static int dce_v10_0_check_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (dce_v10_0_is_display_hung(adev)) + adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = true; + else + adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = false; + + return 0; +} + static int dce_v10_0_soft_reset(void *handle) { u32 srbm_soft_reset = 0, tmp; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) + return 0; + if (dce_v10_0_is_display_hung(adev)) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; @@ -3512,6 +3527,7 @@ const struct amd_ip_funcs dce_v10_0_ip_funcs = { .resume = dce_v10_0_resume, .is_idle = dce_v10_0_is_idle, .wait_for_idle = dce_v10_0_wait_for_idle, + .check_soft_reset = dce_v10_0_check_soft_reset, .soft_reset = dce_v10_0_soft_reset, .set_clockgating_state = dce_v10_0_set_clockgating_state, .set_powergating_state = dce_v10_0_set_powergating_state, From 7c204889dec5e745cdcc174388fa3aa824e75160 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 14 Dec 2015 13:18:01 +0100 Subject: [PATCH 013/171] drm/amdgpu: add new helper for in kernel allocations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We often allocate, pin and map things at the same time in the kernel. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 63 ++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 4 ++ 2 files changed, 67 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 6f0873c75a25..93573586e2de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -211,6 +211,69 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, bo->placement.busy_placement = bo->placements; } +/** + * amdgpu_bo_create_kernel - create BO for kernel use + * + * @adev: amdgpu device object + * @size: size for the new BO + * @align: alignment for the new BO + * @domain: where to place it + * @bo_ptr: resulting BO + * @gpu_addr: GPU addr of the pinned BO + * @cpu_addr: optional CPU address mapping + * + * Allocates and pins a BO for kernel internal use. + * + * Returns 0 on success, negative error code otherwise. + */ +int amdgpu_bo_create_kernel(struct amdgpu_device *adev, + unsigned long size, int align, + u32 domain, struct amdgpu_bo **bo_ptr, + u64 *gpu_addr, void **cpu_addr) +{ + int r; + + r = amdgpu_bo_create(adev, size, align, true, domain, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, bo_ptr); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r); + return r; + } + + r = amdgpu_bo_reserve(*bo_ptr, false); + if (r) { + dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r); + goto error_free; + } + + r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr); + if (r) { + dev_err(adev->dev, "(%d) kernel bo pin failed\n", r); + goto error_unreserve; + } + + if (cpu_addr) { + r = amdgpu_bo_kmap(*bo_ptr, cpu_addr); + if (r) { + dev_err(adev->dev, "(%d) kernel bo map failed\n", r); + goto error_unreserve; + } + } + + amdgpu_bo_unreserve(*bo_ptr); + + return 0; + +error_unreserve: + amdgpu_bo_unreserve(*bo_ptr); + +error_free: + amdgpu_bo_unref(bo_ptr); + + return r; +} + int amdgpu_bo_create_restricted(struct amdgpu_device *adev, unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index bdb01d932548..ae188a88082d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -139,6 +139,10 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, struct ttm_placement *placement, struct reservation_object *resv, struct amdgpu_bo **bo_ptr); +int amdgpu_bo_create_kernel(struct amdgpu_device *adev, + unsigned long size, int align, + u32 domain, struct amdgpu_bo **bo_ptr, + u64 *gpu_addr, void **cpu_addr); int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); void amdgpu_bo_kunmap(struct amdgpu_bo *bo); struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); From 78bbbd9c0fcde51b56c5bec99dd286cbad698712 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 14 Dec 2015 13:23:57 +0100 Subject: [PATCH 014/171] drm/amdgpu: pin shared GWS, GDS and OA resources MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit They can't move anyway, but just to be clean here. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 21 +++++++++------------ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 21 +++++++++------------ 2 files changed, 18 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index d869d058ef24..f4fbec3e224e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4465,24 +4465,21 @@ static int gfx_v7_0_sw_init(void *handle) } /* reserve GDS, GWS and OA resource for gfx */ - r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GDS, 0, - NULL, NULL, &adev->gds.gds_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS, + &adev->gds.gds_gfx_bo, NULL, NULL); if (r) return r; - r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GWS, 0, - NULL, NULL, &adev->gds.gws_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS, + &adev->gds.gws_gfx_bo, NULL, NULL); if (r) return r; - r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_OA, 0, - NULL, NULL, &adev->gds.oa_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA, + &adev->gds.oa_gfx_bo, NULL, NULL); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index af0efa250d80..ce0929bbf2e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -2078,24 +2078,21 @@ static int gfx_v8_0_sw_init(void *handle) } /* reserve GDS, GWS and OA resource for gfx */ - r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GDS, 0, NULL, - NULL, &adev->gds.gds_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS, + &adev->gds.gds_gfx_bo, NULL, NULL); if (r) return r; - r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GWS, 0, NULL, - NULL, &adev->gds.gws_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS, + &adev->gds.gws_gfx_bo, NULL, NULL); if (r) return r; - r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_OA, 0, NULL, - NULL, &adev->gds.oa_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA, + &adev->gds.oa_gfx_bo, NULL, NULL); if (r) return r; From cdb7e8f2733579e35ec2883bde7b594d07ad67e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 25 Jul 2016 17:56:18 +0200 Subject: [PATCH 015/171] drm/amdgpu: add more warning to amdgpu_bo_offset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Warn when we try to get the address and the BO isn't locked or reserved. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 18 ++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 16 +--------------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 93573586e2de..67de19c46ddb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -737,3 +737,21 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, else reservation_object_add_excl_fence(resv, fence); } + +/** + * amdgpu_bo_gpu_offset - return GPU offset of bo + * @bo: amdgpu object for which we query the offset + * + * Returns current GPU offset of the object. + * + * Note: object should either be pinned or reserved when calling this + * function, it might be useful to add check for this for debugging. + */ +u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) +{ + WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); + WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) && + !bo->pin_count); + + return bo->tbo.offset; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index ae188a88082d..d650b42279e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -85,21 +85,6 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) ttm_bo_unreserve(&bo->tbo); } -/** - * amdgpu_bo_gpu_offset - return GPU offset of bo - * @bo: amdgpu object for which we query the offset - * - * Returns current GPU offset of the object. - * - * Note: object should either be pinned or reserved when calling this - * function, it might be useful to add check for this for debugging. - */ -static inline u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) -{ - WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); - return bo->tbo.offset; -} - static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo) { return bo->tbo.num_pages << PAGE_SHIFT; @@ -169,6 +154,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, bool shared); +u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); /* * sub allocation From 4b62e697777d79c88c61a2cdb826a6c413052a09 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 25 Jul 2016 17:37:38 +0200 Subject: [PATCH 016/171] drm/amdgpu: user amdgpu_bo_create_kernel for the UVD BO MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Saves us some code. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 31 +++---------------------- 1 file changed, 3 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index b11f4e8868d7..c22b64ec4ec1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -201,39 +201,14 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles; - r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, NULL, &adev->uvd.vcpu_bo); + r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo, + &adev->uvd.gpu_addr, &adev->uvd.cpu_addr); if (r) { dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); return r; } - r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); - if (r) { - amdgpu_bo_unref(&adev->uvd.vcpu_bo); - dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r); - return r; - } - - r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM, - &adev->uvd.gpu_addr); - if (r) { - amdgpu_bo_unreserve(adev->uvd.vcpu_bo); - amdgpu_bo_unref(&adev->uvd.vcpu_bo); - dev_err(adev->dev, "(%d) UVD bo pin failed\n", r); - return r; - } - - r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr); - if (r) { - dev_err(adev->dev, "(%d) UVD map failed\n", r); - return r; - } - - amdgpu_bo_unreserve(adev->uvd.vcpu_bo); - ring = &adev->uvd.ring; rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity, From c6f4439ce669f4ffaf5ac5254ad477befc03458a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 25 Jul 2016 18:06:30 +0200 Subject: [PATCH 017/171] drm/amdgpu: use amdgpu_bo_create_kernel in amdgpu_ih.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Saves us quite a bunch of code. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c | 27 +++++--------------------- 1 file changed, 5 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index 534fc04e80fd..5ebb3f43feb6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -40,32 +40,15 @@ static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev) /* Allocate ring buffer */ if (adev->irq.ih.ring_obj == NULL) { - r = amdgpu_bo_create(adev, adev->irq.ih.ring_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, - NULL, NULL, &adev->irq.ih.ring_obj); + r = amdgpu_bo_create_kernel(adev, adev->irq.ih.ring_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, + &adev->irq.ih.ring_obj, + &adev->irq.ih.gpu_addr, + (void **)&adev->irq.ih.ring); if (r) { DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r); return r; } - r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false); - if (unlikely(r != 0)) - return r; - r = amdgpu_bo_pin(adev->irq.ih.ring_obj, - AMDGPU_GEM_DOMAIN_GTT, - &adev->irq.ih.gpu_addr); - if (r) { - amdgpu_bo_unreserve(adev->irq.ih.ring_obj); - DRM_ERROR("amdgpu: failed to pin ih ring buffer (%d).\n", r); - return r; - } - r = amdgpu_bo_kmap(adev->irq.ih.ring_obj, - (void **)&adev->irq.ih.ring); - amdgpu_bo_unreserve(adev->irq.ih.ring_obj); - if (r) { - DRM_ERROR("amdgpu: failed to map ih ring buffer (%d).\n", r); - return r; - } } return 0; } From 37ac235bf8c8a44c6c5aa8b03ab3d5ad23f64cef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 26 Jul 2016 09:58:45 +0200 Subject: [PATCH 018/171] drm/amdgpu: use amdgpu_bo_create_kernel in amdgpu_ring.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Saves us quite a bunch of code. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 27 +++++------------------- 1 file changed, 5 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 85aeb0a804bb..242ba04bfde6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -222,33 +222,16 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, /* Allocate ring buffer */ if (ring->ring_obj == NULL) { - r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, - NULL, NULL, &ring->ring_obj); + r = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &ring->ring_obj, + &ring->gpu_addr, + (void **)&ring->ring); if (r) { dev_err(adev->dev, "(%d) ring create failed\n", r); return r; } - r = amdgpu_bo_reserve(ring->ring_obj, false); - if (unlikely(r != 0)) - return r; - r = amdgpu_bo_pin(ring->ring_obj, AMDGPU_GEM_DOMAIN_GTT, - &ring->gpu_addr); - if (r) { - amdgpu_bo_unreserve(ring->ring_obj); - dev_err(adev->dev, "(%d) ring pin failed\n", r); - return r; - } - r = amdgpu_bo_kmap(ring->ring_obj, - (void **)&ring->ring); - memset((void *)ring->ring, 0, ring->ring_size); - - amdgpu_bo_unreserve(ring->ring_obj); - if (r) { - dev_err(adev->dev, "(%d) ring map failed\n", r); - return r; - } } ring->ptr_mask = (ring->ring_size / 4) - 1; ring->max_dw = max_dw; From d31d3c28c77e25990ea295c1536253043c7b255c Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 21 Jun 2016 10:26:26 +0800 Subject: [PATCH 019/171] drm/amd/powerplay: add SMU71 header files for iceland (v2) v2: cleanup headers, add copyright Signed-off-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/smu71.h | 510 ++++++++++++++ .../drm/amd/powerplay/inc/smu71_discrete.h | 631 ++++++++++++++++++ 2 files changed, 1141 insertions(+) create mode 100644 drivers/gpu/drm/amd/powerplay/inc/smu71.h create mode 100644 drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu71.h b/drivers/gpu/drm/amd/powerplay/inc/smu71.h new file mode 100644 index 000000000000..71c9b2d28640 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/smu71.h @@ -0,0 +1,510 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef SMU71_H +#define SMU71_H + +#if !defined(SMC_MICROCODE) +#pragma pack(push, 1) +#endif + +#define SMU__NUM_PCIE_DPM_LEVELS 8 +#define SMU__NUM_SCLK_DPM_STATE 8 +#define SMU__NUM_MCLK_DPM_LEVELS 4 +#define SMU__VARIANT__ICELAND 1 +#define SMU__DGPU_ONLY 1 +#define SMU__DYNAMIC_MCARB_SETTINGS 1 + +enum SID_OPTION { + SID_OPTION_HI, + SID_OPTION_LO, + SID_OPTION_COUNT +}; + +typedef struct { + uint32_t high; + uint32_t low; +} data_64_t; + +typedef struct { + data_64_t high; + data_64_t low; +} data_128_t; + +#define SMU7_CONTEXT_ID_SMC 1 +#define SMU7_CONTEXT_ID_VBIOS 2 + +#define SMU71_MAX_LEVELS_VDDC 8 +#define SMU71_MAX_LEVELS_VDDCI 4 +#define SMU71_MAX_LEVELS_MVDD 4 +#define SMU71_MAX_LEVELS_VDDNB 8 + +#define SMU71_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE +#define SMU71_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS +#define SMU71_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS +#define SMU71_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS +#define SMU71_MAX_ENTRIES_SMIO 32 + +#define DPM_NO_LIMIT 0 +#define DPM_NO_UP 1 +#define DPM_GO_DOWN 2 +#define DPM_GO_UP 3 + +#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0 +#define SMU7_FIRST_DPM_MEMORY_LEVEL 0 + +#define GPIO_CLAMP_MODE_VRHOT 1 +#define GPIO_CLAMP_MODE_THERM 2 +#define GPIO_CLAMP_MODE_DC 4 + +#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0 +#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7< Date: Mon, 20 Jun 2016 00:42:01 +0800 Subject: [PATCH 020/171] drm/amd/powerplay: add iceland SMU mananger The system management unit (SMU) is a subcomponent of the northbridge that is responsible for a variety of system and power management tasks during boot and runtime for GPU. In powerplay, it will be used on firmware loading and power task management. This patch adds SMU mananger for iceland. Signed-off-by: Huang Rui Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smumgr/Makefile | 3 +- .../drm/amd/powerplay/smumgr/iceland_smumgr.c | 713 ++++++++++++++++++ .../drm/amd/powerplay/smumgr/iceland_smumgr.h | 64 ++ drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | 4 + 4 files changed, 783 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile index f10fb64ef981..19e79469f6bc 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile @@ -2,7 +2,8 @@ # Makefile for the 'smu manager' sub-component of powerplay. # It provides the smu management services for the driver. -SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o polaris10_smumgr.o +SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o \ + polaris10_smumgr.o iceland_smumgr.o AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c new file mode 100644 index 000000000000..f50658332d9d --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -0,0 +1,713 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui + * + */ +#include +#include +#include +#include + +#include "smumgr.h" +#include "iceland_smumgr.h" +#include "pp_debug.h" +#include "smu_ucode_xfer_vi.h" +#include "ppsmc.h" +#include "smu/smu_7_1_1_d.h" +#include "smu/smu_7_1_1_sh_mask.h" +#include "cgs_common.h" + +#define ICELAND_SMC_SIZE 0x20000 +#define BUFFER_SIZE 80000 +#define MAX_STRING_SIZE 15 +#define BUFFER_SIZETWO 131072 /*128 *1024*/ + +/** + * Set the address for reading/writing the SMC SRAM space. + * @param smumgr the address of the powerplay hardware manager. + * @param smcAddress the address in the SMC RAM to access. + */ +static int iceland_set_smc_sram_address(struct pp_smumgr *smumgr, + uint32_t smcAddress, uint32_t limit) +{ + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + PP_ASSERT_WITH_CODE((0 == (3 & smcAddress)), + "SMC address must be 4 byte aligned.", + return -1;); + + PP_ASSERT_WITH_CODE((limit > (smcAddress + 3)), + "SMC address is beyond the SMC RAM area.", + return -1;); + + cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smcAddress); + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + + return 0; +} + +/** + * Copy bytes from an array into the SMC RAM space. + * + * @param smumgr the address of the powerplay SMU manager. + * @param smcStartAddress the start address in the SMC RAM to copy bytes to. + * @param src the byte array to copy the bytes from. + * @param byteCount the number of bytes to copy. + */ +int iceland_copy_bytes_to_smc(struct pp_smumgr *smumgr, + uint32_t smcStartAddress, const uint8_t *src, + uint32_t byteCount, uint32_t limit) +{ + uint32_t addr; + uint32_t data, orig_data; + int result = 0; + uint32_t extra_shift; + + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)), + "SMC address must be 4 byte aligned.", + return 0;); + + PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)), + "SMC address is beyond the SMC RAM area.", + return 0;); + + addr = smcStartAddress; + + while (byteCount >= 4) { + /* + * Bytes are written into the + * SMC address space with the MSB first + */ + data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; + + result = iceland_set_smc_sram_address(smumgr, addr, limit); + + if (result) + goto out; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + + src += 4; + byteCount -= 4; + addr += 4; + } + + if (0 != byteCount) { + /* Now write odd bytes left, do a read modify write cycle */ + data = 0; + + result = iceland_set_smc_sram_address(smumgr, addr, limit); + if (result) + goto out; + + orig_data = cgs_read_register(smumgr->device, + mmSMC_IND_DATA_0); + extra_shift = 8 * (4 - byteCount); + + while (byteCount > 0) { + data = (data << 8) + *src++; + byteCount--; + } + + data <<= extra_shift; + data |= (orig_data & ~((~0UL) << extra_shift)); + + result = iceland_set_smc_sram_address(smumgr, addr, limit); + if (result) + goto out; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + } + +out: + return result; +} + +/** + * Deassert the reset'pin' (set it to high). + * + * @param smumgr the address of the powerplay hardware manager. + */ +static int iceland_start_smc(struct pp_smumgr *smumgr) +{ + SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, rst_reg, 0); + + return 0; +} + +static void iceland_pp_reset_smc(struct pp_smumgr *smumgr) +{ + SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, + rst_reg, 1); +} + +int iceland_program_jump_on_start(struct pp_smumgr *smumgr) +{ + static const unsigned char pData[] = { 0xE0, 0x00, 0x80, 0x40 }; + + iceland_copy_bytes_to_smc(smumgr, 0x0, pData, 4, sizeof(pData)+1); + + return 0; +} + +/** + * Return if the SMC is currently running. + * + * @param smumgr the address of the powerplay hardware manager. + */ +bool iceland_is_smc_ram_running(struct pp_smumgr *smumgr) +{ + uint32_t val1, val2; + + val1 = SMUM_READ_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_CLOCK_CNTL_0, ck_disable); + val2 = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, + ixSMC_PC_C); + + return ((0 == val1) && (0x20100 <= val2)); +} + +/** + * Send a message to the SMC, and wait for its response. + * + * @param smumgr the address of the powerplay hardware manager. + * @param msg the message to send. + * @return The response that came from the SMC. + */ +static int iceland_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +{ + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + if (!iceland_is_smc_ram_running(smumgr)) + return -EINVAL; + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + PP_ASSERT_WITH_CODE( + 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), + "Failed to send Previous Message.", + ); + + cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + PP_ASSERT_WITH_CODE( + 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), + "Failed to send Message.", + ); + + return 0; +} + +/** + * Send a message to the SMC with parameter + * + * @param smumgr: the address of the powerplay hardware manager. + * @param msg: the message to send. + * @param parameter: the parameter to send + * @return The response that came from the SMC. + */ +static int iceland_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, + uint16_t msg, uint32_t parameter) +{ + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); + + return iceland_send_msg_to_smc(smumgr, msg); +} + +/* + * Read a 32bit value from the SMC SRAM space. + * ALL PARAMETERS ARE IN HOST BYTE ORDER. + * @param smumgr the address of the powerplay hardware manager. + * @param smcAddress the address in the SMC RAM to access. + * @param value and output parameter for the data read from the SMC SRAM. + */ +int iceland_read_smc_sram_dword(struct pp_smumgr *smumgr, + uint32_t smcAddress, uint32_t *value, + uint32_t limit) +{ + int result; + + result = iceland_set_smc_sram_address(smumgr, smcAddress, limit); + + if (0 != result) + return result; + + *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0); + + return 0; +} + +/* + * Write a 32bit value to the SMC SRAM space. + * ALL PARAMETERS ARE IN HOST BYTE ORDER. + * @param smumgr the address of the powerplay hardware manager. + * @param smcAddress the address in the SMC RAM to access. + * @param value to write to the SMC SRAM. + */ +int iceland_write_smc_sram_dword(struct pp_smumgr *smumgr, + uint32_t smcAddress, uint32_t value, + uint32_t limit) +{ + int result; + + result = iceland_set_smc_sram_address(smumgr, smcAddress, limit); + + if (0 != result) + return result; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value); + + return 0; +} + +static int iceland_smu_fini(struct pp_smumgr *smumgr) +{ + struct iceland_smumgr *priv = (struct iceland_smumgr *)(smumgr->backend); + + smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); + + if (smumgr->backend != NULL) { + kfree(smumgr->backend); + smumgr->backend = NULL; + } + + cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); + return 0; +} + +static enum cgs_ucode_id iceland_convert_fw_type_to_cgs(uint32_t fw_type) +{ + enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM; + + switch (fw_type) { + case UCODE_ID_SMU: + result = CGS_UCODE_ID_SMU; + break; + case UCODE_ID_SDMA0: + result = CGS_UCODE_ID_SDMA0; + break; + case UCODE_ID_SDMA1: + result = CGS_UCODE_ID_SDMA1; + break; + case UCODE_ID_CP_CE: + result = CGS_UCODE_ID_CP_CE; + break; + case UCODE_ID_CP_PFP: + result = CGS_UCODE_ID_CP_PFP; + break; + case UCODE_ID_CP_ME: + result = CGS_UCODE_ID_CP_ME; + break; + case UCODE_ID_CP_MEC: + result = CGS_UCODE_ID_CP_MEC; + break; + case UCODE_ID_CP_MEC_JT1: + result = CGS_UCODE_ID_CP_MEC_JT1; + break; + case UCODE_ID_CP_MEC_JT2: + result = CGS_UCODE_ID_CP_MEC_JT2; + break; + case UCODE_ID_RLC_G: + result = CGS_UCODE_ID_RLC_G; + break; + default: + break; + } + + return result; +} + +/** + * Convert the PPIRI firmware type to SMU type mask. + * For MEC, we need to check all MEC related type + */ +static uint16_t iceland_get_mask_for_firmware_type(uint16_t firmwareType) +{ + uint16_t result = 0; + + switch (firmwareType) { + case UCODE_ID_SDMA0: + result = UCODE_ID_SDMA0_MASK; + break; + case UCODE_ID_SDMA1: + result = UCODE_ID_SDMA1_MASK; + break; + case UCODE_ID_CP_CE: + result = UCODE_ID_CP_CE_MASK; + break; + case UCODE_ID_CP_PFP: + result = UCODE_ID_CP_PFP_MASK; + break; + case UCODE_ID_CP_ME: + result = UCODE_ID_CP_ME_MASK; + break; + case UCODE_ID_CP_MEC: + case UCODE_ID_CP_MEC_JT1: + case UCODE_ID_CP_MEC_JT2: + result = UCODE_ID_CP_MEC_MASK; + break; + case UCODE_ID_RLC_G: + result = UCODE_ID_RLC_G_MASK; + break; + default: + break; + } + + return result; +} + +/** + * Check if the FW has been loaded, + * SMU will not return if loading has not finished. +*/ +static int iceland_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fwType) +{ + uint16_t fwMask = iceland_get_mask_for_firmware_type(fwType); + + if (0 != SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND, + SOFT_REGISTERS_TABLE_27, fwMask, fwMask)) { + pr_err("[ powerplay ] check firmware loading failed\n"); + return -EINVAL; + } + + return 0; +} + +/* Populate one firmware image to the data structure */ +static int iceland_populate_single_firmware_entry(struct pp_smumgr *smumgr, + uint16_t firmware_type, + struct SMU_Entry *pentry) +{ + int result; + struct cgs_firmware_info info = {0}; + + result = cgs_get_firmware_info( + smumgr->device, + iceland_convert_fw_type_to_cgs(firmware_type), + &info); + + if (result == 0) { + pentry->version = 0; + pentry->id = (uint16_t)firmware_type; + pentry->image_addr_high = smu_upper_32_bits(info.mc_addr); + pentry->image_addr_low = smu_lower_32_bits(info.mc_addr); + pentry->meta_data_addr_high = 0; + pentry->meta_data_addr_low = 0; + pentry->data_size_byte = info.image_size; + pentry->num_register_entries = 0; + + if (firmware_type == UCODE_ID_RLC_G) + pentry->flags = 1; + else + pentry->flags = 0; + } else { + return result; + } + + return result; +} + +static void iceland_pp_stop_smc_clock(struct pp_smumgr *smumgr) +{ + SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_CLOCK_CNTL_0, + ck_disable, 1); +} + +static void iceland_start_smc_clock(struct pp_smumgr *smumgr) +{ + SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_CLOCK_CNTL_0, + ck_disable, 0); +} + +int iceland_smu_start_smc(struct pp_smumgr *smumgr) +{ + /* set smc instruct start point at 0x0 */ + iceland_program_jump_on_start(smumgr); + + /* enable smc clock */ + iceland_start_smc_clock(smumgr); + + /* de-assert reset */ + iceland_start_smc(smumgr); + + SMUM_WAIT_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS, + INTERRUPTS_ENABLED, 1); + + return 0; +} + +/** + * Upload the SMC firmware to the SMC microcontroller. + * + * @param smumgr the address of the powerplay hardware manager. + * @param pFirmware the data structure containing the various sections of the firmware. + */ +int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr) +{ + const uint8_t *src; + uint32_t byte_count, val; + uint32_t data; + struct cgs_firmware_info info = {0}; + + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + /* load SMC firmware */ + cgs_get_firmware_info(smumgr->device, + iceland_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); + + if (info.image_size & 3) { + pr_err("[ powerplay ] SMC ucode is not 4 bytes aligned\n"); + return -EINVAL; + } + + if (info.image_size > ICELAND_SMC_SIZE) { + pr_err("[ powerplay ] SMC address is beyond the SMC RAM area\n"); + return -EINVAL; + } + + /* wait for smc boot up */ + SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + RCU_UC_EVENTS, boot_seq_done, 0); + + /* clear firmware interrupt enable flag */ + val = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, + ixSMC_SYSCON_MISC_CNTL); + cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + ixSMC_SYSCON_MISC_CNTL, val | 1); + + /* stop smc clock */ + iceland_pp_stop_smc_clock(smumgr); + + /* reset smc */ + iceland_pp_reset_smc(smumgr); + + cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, + info.ucode_start_address); + + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, + AUTO_INCREMENT_IND_0, 1); + + byte_count = info.image_size; + src = (const uint8_t *)info.kptr; + + while (byte_count >= 4) { + data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + src += 4; + byte_count -= 4; + } + + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, + AUTO_INCREMENT_IND_0, 0); + + return 0; +} + +static int iceland_request_smu_reload_fw(struct pp_smumgr *smumgr) +{ + struct iceland_smumgr *iceland_smu = + (struct iceland_smumgr *)(smumgr->backend); + uint16_t fw_to_load; + int result = 0; + struct SMU_DRAMData_TOC *toc; + + toc = (struct SMU_DRAMData_TOC *)iceland_smu->pHeader; + toc->num_entries = 0; + toc->structure_version = 1; + + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry(smumgr, + UCODE_ID_RLC_G, + &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", + return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry(smumgr, + UCODE_ID_CP_CE, + &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", + return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry + (smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry + (smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry + (smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry + (smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry + (smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry + (smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry + (smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + + if (!iceland_is_smc_ram_running(smumgr)) { + result = iceland_smu_upload_firmware_image(smumgr); + if (result) + return result; + + result = iceland_smu_start_smc(smumgr); + if (result) + return result; + } + + iceland_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_DRV_DRAM_ADDR_HI, + iceland_smu->header_buffer.mc_addr_high); + + iceland_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_DRV_DRAM_ADDR_LO, + iceland_smu->header_buffer.mc_addr_low); + + fw_to_load = UCODE_ID_RLC_G_MASK + + UCODE_ID_SDMA0_MASK + + UCODE_ID_SDMA1_MASK + + UCODE_ID_CP_CE_MASK + + UCODE_ID_CP_ME_MASK + + UCODE_ID_CP_PFP_MASK + + UCODE_ID_CP_MEC_MASK + + UCODE_ID_CP_MEC_JT1_MASK + + UCODE_ID_CP_MEC_JT2_MASK; + + PP_ASSERT_WITH_CODE( + 0 == iceland_send_msg_to_smc_with_parameter( + smumgr, PPSMC_MSG_LoadUcodes, fw_to_load), + "Fail to Request SMU Load uCode", return 0); + + return result; +} + +static int iceland_request_smu_load_specific_fw(struct pp_smumgr *smumgr, + uint32_t firmwareType) +{ + return 0; +} + +static int iceland_start_smu(struct pp_smumgr *smumgr) +{ + int result; + + result = iceland_smu_upload_firmware_image(smumgr); + if (result) + return result; + + result = iceland_smu_start_smc(smumgr); + if (result) + return result; + + result = iceland_request_smu_reload_fw(smumgr); + + return result; +} + +/** + * Write a 32bit value to the SMC SRAM space. + * ALL PARAMETERS ARE IN HOST BYTE ORDER. + * @param smumgr the address of the powerplay hardware manager. + * @param smcAddress the address in the SMC RAM to access. + * @param value to write to the SMC SRAM. + */ +static int iceland_smu_init(struct pp_smumgr *smumgr) +{ + struct iceland_smumgr *iceland_smu; + uint64_t mc_addr = 0; + + /* Allocate memory for backend private data */ + iceland_smu = (struct iceland_smumgr *)(smumgr->backend); + iceland_smu->header_buffer.data_size = + ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; + + smu_allocate_memory(smumgr->device, + iceland_smu->header_buffer.data_size, + CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, + PAGE_SIZE, + &mc_addr, + &iceland_smu->header_buffer.kaddr, + &iceland_smu->header_buffer.handle); + + iceland_smu->pHeader = iceland_smu->header_buffer.kaddr; + iceland_smu->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); + iceland_smu->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); + + PP_ASSERT_WITH_CODE((NULL != iceland_smu->pHeader), + "Out of memory.", + kfree(smumgr->backend); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)iceland_smu->header_buffer.handle); + return -1); + + return 0; +} + +static const struct pp_smumgr_func iceland_smu_funcs = { + .smu_init = &iceland_smu_init, + .smu_fini = &iceland_smu_fini, + .start_smu = &iceland_start_smu, + .check_fw_load_finish = &iceland_check_fw_load_finish, + .request_smu_load_fw = &iceland_request_smu_reload_fw, + .request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw, + .send_msg_to_smc = &iceland_send_msg_to_smc, + .send_msg_to_smc_with_parameter = &iceland_send_msg_to_smc_with_parameter, + .download_pptable_settings = NULL, + .upload_pptable_settings = NULL, +}; + +int iceland_smum_init(struct pp_smumgr *smumgr) +{ + struct iceland_smumgr *iceland_smu = NULL; + + iceland_smu = kzalloc(sizeof(struct iceland_smumgr), GFP_KERNEL); + + if (iceland_smu == NULL) + return -ENOMEM; + + smumgr->backend = iceland_smu; + smumgr->smumgr_funcs = &iceland_smu_funcs; + + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h new file mode 100644 index 000000000000..62009a7ae827 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h @@ -0,0 +1,64 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui + * + */ + +#ifndef _ICELAND_SMUMGR_H_ +#define _ICELAND_SMUMGR_H_ + +struct iceland_buffer_entry { + uint32_t data_size; + uint32_t mc_addr_low; + uint32_t mc_addr_high; + void *kaddr; + unsigned long handle; +}; + +/* Iceland only has header_buffer, don't have smu buffer. */ +struct iceland_smumgr { + uint8_t *pHeader; + uint8_t *pMecImage; + uint32_t ulSoftRegsStart; + + struct iceland_buffer_entry header_buffer; +}; + +extern int iceland_smum_init(struct pp_smumgr *smumgr); +extern int iceland_copy_bytes_to_smc(struct pp_smumgr *smumgr, + uint32_t smcStartAddress, + const uint8_t *src, + uint32_t byteCount, uint32_t limit); + +extern int iceland_smu_start_smc(struct pp_smumgr *smumgr); + +extern int iceland_read_smc_sram_dword(struct pp_smumgr *smumgr, + uint32_t smcAddress, + uint32_t *value, uint32_t limit); +extern int iceland_write_smc_sram_dword(struct pp_smumgr *smumgr, + uint32_t smcAddress, + uint32_t value, uint32_t limit); + +extern bool iceland_is_smc_ram_running(struct pp_smumgr *smumgr); +extern int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index 7723473e51a0..cf3cabee8918 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -30,6 +30,7 @@ #include "linux/delay.h" #include "cz_smumgr.h" #include "tonga_smumgr.h" +#include "iceland_smumgr.h" #include "fiji_smumgr.h" #include "polaris10_smumgr.h" @@ -58,6 +59,9 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle) break; case AMDGPU_FAMILY_VI: switch (smumgr->chip_id) { + case CHIP_TOPAZ: + iceland_smum_init(smumgr); + break; case CHIP_TONGA: tonga_smum_init(smumgr); break; From d550df0b60d444e446afba00c41894bf350de4c0 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 12 Jul 2016 00:52:24 +0800 Subject: [PATCH 021/171] drm/amd/powerplay: add iceland_dyn_defaults header Signed-off-by: Huang Rui Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../powerplay/hwmgr/iceland_dyn_defaults.h | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h new file mode 100644 index 000000000000..a7b4bc6caea2 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h @@ -0,0 +1,41 @@ +#ifndef ICELAND_DYN_DEFAULTS_H +#define ICELAND_DYN_DEFAULTS_H + +enum ICELANDdpm_TrendDetection +{ + ICELANDdpm_TrendDetection_AUTO, + ICELANDdpm_TrendDetection_UP, + ICELANDdpm_TrendDetection_DOWN +}; +typedef enum ICELANDdpm_TrendDetection ICELANDdpm_TrendDetection; + + +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 + + +#define PPICELAND_THERMALPROTECTCOUNTER_DFLT 0x200 + +#define PPICELAND_STATICSCREENTHRESHOLDUNIT_DFLT 0 + +#define PPICELAND_STATICSCREENTHRESHOLD_DFLT 0x00C8 + +#define PPICELAND_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 + +#define PPICELAND_REFERENCEDIVIDER_DFLT 4 + +#define PPICELAND_ULVVOLTAGECHANGEDELAY_DFLT 1687 + +#define PPICELAND_CGULVPARAMETER_DFLT 0x00040035 +#define PPICELAND_CGULVCONTROL_DFLT 0x00007450 +#define PPICELAND_TARGETACTIVITY_DFLT 30 +#define PPICELAND_MCLK_TARGETACTIVITY_DFLT 10 + +#endif + From 025f8bfb84cbcaa78df31ab00d7e3c5f979e9e27 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 12 Jul 2016 15:45:12 +0800 Subject: [PATCH 022/171] drm/amd/powerplay: add iceland HW manager This patch introduces the iceland HW manager of powerplay which includes HW manager, clockpowergating, thermal, and powertune. Signed-off-by: Huang Rui Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/Makefile | 4 +- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 4 + .../hwmgr/iceland_clockpowergating.c | 119 + .../hwmgr/iceland_clockpowergating.h | 38 + .../drm/amd/powerplay/hwmgr/iceland_hwmgr.c | 5617 +++++++++++++++++ .../drm/amd/powerplay/hwmgr/iceland_hwmgr.h | 424 ++ .../amd/powerplay/hwmgr/iceland_powertune.c | 491 ++ .../amd/powerplay/hwmgr/iceland_powertune.h | 74 + .../drm/amd/powerplay/hwmgr/iceland_thermal.c | 595 ++ .../drm/amd/powerplay/hwmgr/iceland_thermal.h | 58 + 10 files changed, 7423 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile index f7ce4cb71346..c264cb66b020 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile @@ -10,7 +10,9 @@ HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \ fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \ fiji_clockpowergating.o fiji_thermal.o \ polaris10_hwmgr.o polaris10_powertune.o polaris10_thermal.o \ - polaris10_clockpowergating.o + polaris10_clockpowergating.o iceland_hwmgr.o \ + iceland_clockpowergating.o iceland_thermal.o \ + iceland_powertune.o AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 27e07624ac28..65408dd22a79 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -39,6 +39,7 @@ extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr); extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr); extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr); extern int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr); +extern int iceland_hwmgr_init(struct pp_hwmgr *hwmgr); int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) { @@ -67,6 +68,9 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) break; case AMDGPU_FAMILY_VI: switch (hwmgr->chip_id) { + case CHIP_TOPAZ: + iceland_hwmgr_init(hwmgr); + break; case CHIP_TONGA: tonga_hwmgr_init(hwmgr); break; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c new file mode 100644 index 000000000000..47949f5cd073 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c @@ -0,0 +1,119 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui + * + */ + +#include "hwmgr.h" +#include "iceland_clockpowergating.h" +#include "ppsmc.h" +#include "iceland_hwmgr.h" + +int iceland_phm_powerdown_uvd(struct pp_hwmgr *hwmgr) +{ + /* iceland does not have MM hardware block */ + return 0; +} + +static int iceland_phm_powerup_uvd(struct pp_hwmgr *hwmgr) +{ + /* iceland does not have MM hardware block */ + return 0; +} + +static int iceland_phm_powerdown_vce(struct pp_hwmgr *hwmgr) +{ + /* iceland does not have MM hardware block */ + return 0; +} + +static int iceland_phm_powerup_vce(struct pp_hwmgr *hwmgr) +{ + /* iceland does not have MM hardware block */ + return 0; +} + +int iceland_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum + PHM_AsicBlock block, enum PHM_ClockGateSetting gating) +{ + int ret = 0; + + switch (block) { + case PHM_AsicBlock_UVD_MVC: + case PHM_AsicBlock_UVD: + case PHM_AsicBlock_UVD_HD: + case PHM_AsicBlock_UVD_SD: + if (gating == PHM_ClockGateSetting_StaticOff) + ret = iceland_phm_powerdown_uvd(hwmgr); + else + ret = iceland_phm_powerup_uvd(hwmgr); + break; + case PHM_AsicBlock_GFX: + default: + break; + } + + return ret; +} + +int iceland_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + data->uvd_power_gated = false; + data->vce_power_gated = false; + + iceland_phm_powerup_uvd(hwmgr); + iceland_phm_powerup_vce(hwmgr); + + return 0; +} + +int iceland_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) +{ + if (bgate) { + iceland_update_uvd_dpm(hwmgr, true); + iceland_phm_powerdown_uvd(hwmgr); + } else { + iceland_phm_powerup_uvd(hwmgr); + iceland_update_uvd_dpm(hwmgr, false); + } + + return 0; +} + +int iceland_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) +{ + if (bgate) + return iceland_phm_powerdown_vce(hwmgr); + else + return iceland_phm_powerup_vce(hwmgr); + + return 0; +} + +int iceland_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, + const uint32_t *msg_id) +{ + /* iceland does not have MM hardware block */ + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h new file mode 100644 index 000000000000..ff5ef00c7c68 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h @@ -0,0 +1,38 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui + * + */ + +#ifndef _ICELAND_CLOCK_POWER_GATING_H_ +#define _ICELAND_CLOCK_POWER_GATING_H_ + +#include "iceland_hwmgr.h" +#include "pp_asicblocks.h" + +extern int iceland_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating); +extern int iceland_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); +extern int iceland_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); +extern int iceland_phm_powerdown_uvd(struct pp_hwmgr *hwmgr); +extern int iceland_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr); +extern int iceland_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id); +#endif /* _ICELAND_CLOCK_POWER_GATING_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c new file mode 100644 index 000000000000..9c6d7e3d5752 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c @@ -0,0 +1,5617 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui + * + */ +#include +#include +#include +#include "linux/delay.h" +#include "pp_acpi.h" +#include "hwmgr.h" +#include +#include "iceland_hwmgr.h" +#include "pptable.h" +#include "processpptables.h" +#include "pp_debug.h" +#include "ppsmc.h" +#include "cgs_common.h" +#include "pppcielanes.h" +#include "iceland_dyn_defaults.h" +#include "smumgr.h" +#include "iceland_smumgr.h" +#include "iceland_clockpowergating.h" +#include "iceland_thermal.h" +#include "iceland_powertune.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "smu/smu_7_1_1_d.h" +#include "smu/smu_7_1_1_sh_mask.h" + +#include "cgs_linux.h" +#include "eventmgr.h" +#include "amd_pcie_helpers.h" + +#define MC_CG_ARB_FREQ_F0 0x0a +#define MC_CG_ARB_FREQ_F1 0x0b +#define MC_CG_ARB_FREQ_F2 0x0c +#define MC_CG_ARB_FREQ_F3 0x0d + +#define MC_CG_SEQ_DRAMCONF_S0 0x05 +#define MC_CG_SEQ_DRAMCONF_S1 0x06 +#define MC_CG_SEQ_YCLK_SUSPEND 0x04 +#define MC_CG_SEQ_YCLK_RESUME 0x0a + +#define PCIE_BUS_CLK 10000 +#define TCLK (PCIE_BUS_CLK / 10) + +#define SMC_RAM_END 0x40000 +#define SMC_CG_IND_START 0xc0030000 +#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND*/ + +#define VOLTAGE_SCALE 4 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 + +const uint32_t iceland_magic = (uint32_t)(PHM_VIslands_Magic); + +#define MC_SEQ_MISC0_GDDR5_SHIFT 28 +#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 +#define MC_SEQ_MISC0_GDDR5_VALUE 5 + +/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ +enum DPM_EVENT_SRC { + DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */ + DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */ + DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */ + DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */ + DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */ +}; + +static int iceland_read_clock_registers(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + data->clock_registers.vCG_SPLL_FUNC_CNTL = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL); + data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2); + data->clock_registers.vCG_SPLL_FUNC_CNTL_3 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3); + data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4); + data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM); + data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2); + data->clock_registers.vDLL_CNTL = + cgs_read_register(hwmgr->device, mmDLL_CNTL); + data->clock_registers.vMCLK_PWRMGT_CNTL = + cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL); + data->clock_registers.vMPLL_AD_FUNC_CNTL = + cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL); + data->clock_registers.vMPLL_DQ_FUNC_CNTL = + cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL); + data->clock_registers.vMPLL_FUNC_CNTL = + cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL); + data->clock_registers.vMPLL_FUNC_CNTL_1 = + cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1); + data->clock_registers.vMPLL_FUNC_CNTL_2 = + cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2); + data->clock_registers.vMPLL_SS1 = + cgs_read_register(hwmgr->device, mmMPLL_SS1); + data->clock_registers.vMPLL_SS2 = + cgs_read_register(hwmgr->device, mmMPLL_SS2); + + return 0; +} + +/** + * Find out if memory is GDDR5. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_get_memory_type(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + uint32_t temp; + + temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0); + + data->is_memory_GDDR5 = (MC_SEQ_MISC0_GDDR5_VALUE == + ((temp & MC_SEQ_MISC0_GDDR5_MASK) >> + MC_SEQ_MISC0_GDDR5_SHIFT)); + + return 0; +} + +int iceland_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) +{ + /* iceland does not have MM hardware blocks */ + return 0; +} + +/** + * Enables Dynamic Power Management by SMC + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_enable_acpi_power_management(struct pp_hwmgr *hwmgr) +{ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, STATIC_PM_EN, 1); + + return 0; +} + +/** + * Find the MC microcode version and store it in the HwMgr struct + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_get_mc_microcode_version(struct pp_hwmgr *hwmgr) +{ + cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); + + hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); + + return 0; +} + +static int iceland_init_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + data->low_sclk_interrupt_threshold = 0; + + return 0; +} + + +static int iceland_setup_asic_task(struct pp_hwmgr *hwmgr) +{ + int tmp_result, result = 0; + + tmp_result = iceland_read_clock_registers(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to read clock registers!", result = tmp_result); + + tmp_result = iceland_get_memory_type(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to get memory type!", result = tmp_result); + + tmp_result = iceland_enable_acpi_power_management(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable ACPI power management!", result = tmp_result); + + tmp_result = iceland_get_mc_microcode_version(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to get MC microcode version!", result = tmp_result); + + tmp_result = iceland_init_sclk_threshold(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to init sclk threshold!", result = tmp_result); + + return result; +} + +static bool cf_iceland_voltage_control(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + return ICELAND_VOLTAGE_CONTROL_NONE != data->voltage_control; +} + +/* + * -------------- Voltage Tables ---------------------- + * If the voltage table would be bigger than what will fit into the + * state table on the SMC keep only the higher entries. + */ + +static void iceland_trim_voltage_table_to_fit_state_table( + struct pp_hwmgr *hwmgr, + uint32_t max_voltage_steps, + pp_atomctrl_voltage_table *voltage_table) +{ + unsigned int i, diff; + + if (voltage_table->count <= max_voltage_steps) { + return; + } + + diff = voltage_table->count - max_voltage_steps; + + for (i = 0; i < max_voltage_steps; i++) { + voltage_table->entries[i] = voltage_table->entries[i + diff]; + } + + voltage_table->count = max_voltage_steps; + + return; +} + +/** + * Enable voltage control + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_enable_voltage_control(struct pp_hwmgr *hwmgr) +{ + /* enable voltage control */ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1); + + return 0; +} + +static int iceland_get_svi2_voltage_table(struct pp_hwmgr *hwmgr, + struct phm_clock_voltage_dependency_table *voltage_dependency_table, + pp_atomctrl_voltage_table *voltage_table) +{ + uint32_t i; + + PP_ASSERT_WITH_CODE((NULL != voltage_table), + "Voltage Dependency Table empty.", return -EINVAL;); + + voltage_table->mask_low = 0; + voltage_table->phase_delay = 0; + voltage_table->count = voltage_dependency_table->count; + + for (i = 0; i < voltage_dependency_table->count; i++) { + voltage_table->entries[i].value = + voltage_dependency_table->entries[i].v; + voltage_table->entries[i].smio_low = 0; + } + + return 0; +} + +/** + * Create Voltage Tables. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_construct_voltage_tables(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + int result; + + /* GPIO voltage */ + if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) { + result = atomctrl_get_voltage_table_v3(hwmgr, + VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT, + &data->vddc_voltage_table); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve VDDC table.", return result;); + } else if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { + /* SVI2 VDDC voltage */ + result = iceland_get_svi2_voltage_table(hwmgr, + hwmgr->dyn_state.vddc_dependency_on_mclk, + &data->vddc_voltage_table); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;); + } + + PP_ASSERT_WITH_CODE( + (data->vddc_voltage_table.count <= (SMU71_MAX_LEVELS_VDDC)), + "Too many voltage values for VDDC. Trimming to fit state table.", + iceland_trim_voltage_table_to_fit_state_table(hwmgr, + SMU71_MAX_LEVELS_VDDC, &(data->vddc_voltage_table)); + ); + + /* GPIO */ + if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) { + result = atomctrl_get_voltage_table_v3(hwmgr, + VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, &(data->vddci_voltage_table)); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve VDDCI table.", return result;); + } + + /* SVI2 VDDCI voltage */ + if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) { + result = iceland_get_svi2_voltage_table(hwmgr, + hwmgr->dyn_state.vddci_dependency_on_mclk, + &data->vddci_voltage_table); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 VDDCI table from dependancy table.", return result;); + } + + PP_ASSERT_WITH_CODE( + (data->vddci_voltage_table.count <= (SMU71_MAX_LEVELS_VDDCI)), + "Too many voltage values for VDDCI. Trimming to fit state table.", + iceland_trim_voltage_table_to_fit_state_table(hwmgr, + SMU71_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table)); + ); + + + /* GPIO */ + if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { + result = atomctrl_get_voltage_table_v3(hwmgr, + VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, &(data->mvdd_voltage_table)); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve table.", return result;); + } + + /* SVI2 voltage control */ + if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { + result = iceland_get_svi2_voltage_table(hwmgr, + hwmgr->dyn_state.mvdd_dependency_on_mclk, + &data->mvdd_voltage_table); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 MVDD table from dependancy table.", return result;); + } + + PP_ASSERT_WITH_CODE( + (data->mvdd_voltage_table.count <= (SMU71_MAX_LEVELS_MVDD)), + "Too many voltage values for MVDD. Trimming to fit state table.", + iceland_trim_voltage_table_to_fit_state_table(hwmgr, + SMU71_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table)); + ); + + return 0; +} + +/*---------------------------MC----------------------------*/ + +uint8_t iceland_get_memory_module_index(struct pp_hwmgr *hwmgr) +{ + return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16)); +} + +bool iceland_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg) +{ + bool result = true; + + switch (inReg) { + case mmMC_SEQ_RAS_TIMING: + *outReg = mmMC_SEQ_RAS_TIMING_LP; + break; + + case mmMC_SEQ_DLL_STBY: + *outReg = mmMC_SEQ_DLL_STBY_LP; + break; + + case mmMC_SEQ_G5PDX_CMD0: + *outReg = mmMC_SEQ_G5PDX_CMD0_LP; + break; + + case mmMC_SEQ_G5PDX_CMD1: + *outReg = mmMC_SEQ_G5PDX_CMD1_LP; + break; + + case mmMC_SEQ_G5PDX_CTRL: + *outReg = mmMC_SEQ_G5PDX_CTRL_LP; + break; + + case mmMC_SEQ_CAS_TIMING: + *outReg = mmMC_SEQ_CAS_TIMING_LP; + break; + + case mmMC_SEQ_MISC_TIMING: + *outReg = mmMC_SEQ_MISC_TIMING_LP; + break; + + case mmMC_SEQ_MISC_TIMING2: + *outReg = mmMC_SEQ_MISC_TIMING2_LP; + break; + + case mmMC_SEQ_PMG_DVS_CMD: + *outReg = mmMC_SEQ_PMG_DVS_CMD_LP; + break; + + case mmMC_SEQ_PMG_DVS_CTL: + *outReg = mmMC_SEQ_PMG_DVS_CTL_LP; + break; + + case mmMC_SEQ_RD_CTL_D0: + *outReg = mmMC_SEQ_RD_CTL_D0_LP; + break; + + case mmMC_SEQ_RD_CTL_D1: + *outReg = mmMC_SEQ_RD_CTL_D1_LP; + break; + + case mmMC_SEQ_WR_CTL_D0: + *outReg = mmMC_SEQ_WR_CTL_D0_LP; + break; + + case mmMC_SEQ_WR_CTL_D1: + *outReg = mmMC_SEQ_WR_CTL_D1_LP; + break; + + case mmMC_PMG_CMD_EMRS: + *outReg = mmMC_SEQ_PMG_CMD_EMRS_LP; + break; + + case mmMC_PMG_CMD_MRS: + *outReg = mmMC_SEQ_PMG_CMD_MRS_LP; + break; + + case mmMC_PMG_CMD_MRS1: + *outReg = mmMC_SEQ_PMG_CMD_MRS1_LP; + break; + + case mmMC_SEQ_PMG_TIMING: + *outReg = mmMC_SEQ_PMG_TIMING_LP; + break; + + case mmMC_PMG_CMD_MRS2: + *outReg = mmMC_SEQ_PMG_CMD_MRS2_LP; + break; + + case mmMC_SEQ_WR_CTL_2: + *outReg = mmMC_SEQ_WR_CTL_2_LP; + break; + + default: + result = false; + break; + } + + return result; +} + +int iceland_set_s0_mc_reg_index(phw_iceland_mc_reg_table *table) +{ + uint32_t i; + uint16_t address; + + for (i = 0; i < table->last; i++) { + table->mc_reg_address[i].s0 = + iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) + ? address : table->mc_reg_address[i].s1; + } + return 0; +} + +int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, phw_iceland_mc_reg_table *ni_table) +{ + uint8_t i, j; + + PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES), + "Invalid VramInfo table.", return -1); + + for (i = 0; i < table->last; i++) { + ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; + } + ni_table->last = table->last; + + for (i = 0; i < table->num_entries; i++) { + ni_table->mc_reg_table_entry[i].mclk_max = + table->mc_reg_table_entry[i].mclk_max; + for (j = 0; j < table->last; j++) { + ni_table->mc_reg_table_entry[i].mc_data[j] = + table->mc_reg_table_entry[i].mc_data[j]; + } + } + + ni_table->num_entries = table->num_entries; + + return 0; +} + +/** + * VBIOS omits some information to reduce size, we need to recover them here. + * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0]. + * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0] + * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0]. + * 3. need to set these data for each clock range + * + * @param hwmgr the address of the powerplay hardware manager. + * @param table the address of MCRegTable + * @return always 0 + */ +static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr, phw_iceland_mc_reg_table *table) +{ + uint8_t i, j, k; + uint32_t temp_reg; + const iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + for (i = 0, j = table->last; i < table->last; i++) { + PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + switch (table->mc_reg_address[i].s1) { + /* + * mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write + * to mmMC_PMG_CMD_EMRS/_LP[15:0]. Bit[15:0] MRS, need + * to be update mmMC_PMG_CMD_MRS/_LP[15:0] + */ + case mmMC_SEQ_MISC1: + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + ((temp_reg & 0xffff0000)) | + ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); + } + j++; + PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + + if (!data->is_memory_GDDR5) { + table->mc_reg_table_entry[k].mc_data[j] |= 0x100; + } + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + + if (!data->is_memory_GDDR5) { + table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; + table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + } + + break; + + case mmMC_SEQ_RESERVE_M: + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + break; + + default: + break; + } + + } + + table->last = j; + + return 0; +} + + +static int iceland_set_valid_flag(phw_iceland_mc_reg_table *table) +{ + uint8_t i, j; + for (i = 0; i < table->last; i++) { + for (j = 1; j < table->num_entries; j++) { + if (table->mc_reg_table_entry[j-1].mc_data[i] != + table->mc_reg_table_entry[j].mc_data[i]) { + table->validflag |= (1<backend); + pp_atomctrl_mc_reg_table *table; + phw_iceland_mc_reg_table *ni_table = &data->iceland_mc_reg_table; + uint8_t module_index = iceland_get_memory_module_index(hwmgr); + + table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL); + + if (NULL == table) + return -ENOMEM; + + /* Program additional LP registers that are no longer programmed by VBIOS */ + cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2)); + + memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table)); + + result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table); + + if (0 == result) + result = iceland_copy_vbios_smc_reg_table(table, ni_table); + + if (0 == result) { + iceland_set_s0_mc_reg_index(ni_table); + result = iceland_set_mc_special_registers(hwmgr, ni_table); + } + + if (0 == result) + iceland_set_valid_flag(ni_table); + + kfree(table); + return result; +} + +/** + * Programs static screed detection parameters + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_program_static_screen_threshold_parameters(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* Set static screen threshold unit*/ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT, + data->static_screen_threshold_unit); + /* Set static screen threshold*/ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD, + data->static_screen_threshold); + + return 0; +} + +/** + * Setup display gap for glitch free memory clock switching. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_enable_display_gap(struct pp_hwmgr *hwmgr) +{ + uint32_t display_gap = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); + + display_gap = PHM_SET_FIELD(display_gap, + CG_DISPLAY_GAP_CNTL, DISP_GAP, DISPLAY_GAP_IGNORE); + + display_gap = PHM_SET_FIELD(display_gap, + CG_DISPLAY_GAP_CNTL, DISP_GAP_MCHG, DISPLAY_GAP_VBLANK); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_DISPLAY_GAP_CNTL, display_gap); + + return 0; +} + +/** + * Programs activity state transition voting clients + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_program_voting_clients(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* Clear reset for voting clients before enabling DPM */ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7); + + return 0; +} + +static int iceland_upload_firmware(struct pp_hwmgr *hwmgr) +{ + int ret = 0; + + if (!iceland_is_smc_ram_running(hwmgr->smumgr)) + ret = iceland_smu_upload_firmware_image(hwmgr->smumgr); + + return ret; +} + +/** + * Get the location of various tables inside the FW image. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + uint32_t tmp; + int result; + bool error = 0; + + result = iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, DpmTable), + &tmp, data->sram_end); + + if (0 == result) { + data->dpm_table_start = tmp; + } + + error |= (0 != result); + + result = iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, SoftRegisters), + &tmp, data->sram_end); + + if (0 == result) { + data->soft_regs_start = tmp; + } + + error |= (0 != result); + + + result = iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, mcRegisterTable), + &tmp, data->sram_end); + + if (0 == result) { + data->mc_reg_table_start = tmp; + } + + result = iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, FanTable), + &tmp, data->sram_end); + + if (0 == result) { + data->fan_table_start = tmp; + } + + error |= (0 != result); + + result = iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, mcArbDramTimingTable), + &tmp, data->sram_end); + + if (0 == result) { + data->arb_table_start = tmp; + } + + error |= (0 != result); + + + result = iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, Version), + &tmp, data->sram_end); + + if (0 == result) { + hwmgr->microcode_version_info.SMC = tmp; + } + + error |= (0 != result); + + result = iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, UlvSettings), + &tmp, data->sram_end); + + if (0 == result) { + data->ulv_settings_start = tmp; + } + + error |= (0 != result); + + return error ? 1 : 0; +} + +/* +* Copy one arb setting to another and then switch the active set. +* arbFreqSrc and arbFreqDest is one of the MC_CG_ARB_FREQ_Fx constants. +*/ +int iceland_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, + uint32_t arbFreqSrc, uint32_t arbFreqDest) +{ + uint32_t mc_arb_dram_timing; + uint32_t mc_arb_dram_timing2; + uint32_t burst_time; + uint32_t mc_cg_config; + + switch (arbFreqSrc) { + case MC_CG_ARB_FREQ_F0: + mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); + break; + + case MC_CG_ARB_FREQ_F1: + mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1); + mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1); + burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1); + break; + + default: + return -1; + } + + switch (arbFreqDest) { + case MC_CG_ARB_FREQ_F0: + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); + PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time); + break; + + case MC_CG_ARB_FREQ_F1: + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); + PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time); + break; + + default: + return -1; + } + + mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG); + mc_cg_config |= 0x0000000F; + cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config); + PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arbFreqDest); + + return 0; +} + +/** + * Initial switch from ARB F0->F1 + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + * This function is to be called from the SetPowerState table. + */ +int iceland_initial_switch_from_arb_f0_to_f1(struct pp_hwmgr *hwmgr) +{ + return iceland_copy_and_switch_arb_sets(hwmgr, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); +} + +/* ---------------------------------------- ULV related functions ----------------------------------------------------*/ + + +static int iceland_reset_single_dpm_table( + struct pp_hwmgr *hwmgr, + struct iceland_single_dpm_table *dpm_table, + uint32_t count) +{ + uint32_t i; + if (!(count <= MAX_REGULAR_DPM_NUMBER)) + printk(KERN_ERR "[ powerplay ] Fatal error, can not set up single DPM \ + table entries to exceed max number! \n"); + + dpm_table->count = count; + for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) { + dpm_table->dpm_levels[i].enabled = 0; + } + + return 0; +} + +static void iceland_setup_pcie_table_entry( + struct iceland_single_dpm_table *dpm_table, + uint32_t index, uint32_t pcie_gen, + uint32_t pcie_lanes) +{ + dpm_table->dpm_levels[index].value = pcie_gen; + dpm_table->dpm_levels[index].param1 = pcie_lanes; + dpm_table->dpm_levels[index].enabled = 1; +} + +/* + * Set up the PCIe DPM table as follows: + * + * A = Performance State, Max, Gen Speed + * C = Performance State, Min, Gen Speed + * 1 = Performance State, Max, Lane # + * 3 = Performance State, Min, Lane # + * + * B = Power Saving State, Max, Gen Speed + * D = Power Saving State, Min, Gen Speed + * 2 = Power Saving State, Max, Lane # + * 4 = Power Saving State, Min, Lane # + * + * + * DPM Index Gen Speed Lane # + * 5 A 1 + * 4 B 2 + * 3 C 1 + * 2 D 2 + * 1 C 3 + * 0 D 4 + * + */ +static int iceland_setup_default_pcie_tables(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels || + data->use_pcie_power_saving_levels), + "No pcie performance levels!", return -EINVAL); + + if (data->use_pcie_performance_levels && !data->use_pcie_power_saving_levels) { + data->pcie_gen_power_saving = data->pcie_gen_performance; + data->pcie_lane_power_saving = data->pcie_lane_performance; + } else if (!data->use_pcie_performance_levels && data->use_pcie_power_saving_levels) { + data->pcie_gen_performance = data->pcie_gen_power_saving; + data->pcie_lane_performance = data->pcie_lane_power_saving; + } + + iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.pcie_speed_table, SMU71_MAX_LEVELS_LINK); + + /* Hardcode Pcie Table */ + iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0, + get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1, + get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2, + get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3, + get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4, + get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5, + get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + data->dpm_table.pcie_speed_table.count = 6; + + return 0; + +} + + +/* + * This function is to initalize all DPM state tables for SMU7 based on the dependency table. + * Dynamic state patching function will then trim these state tables to the allowed range based + * on the power policy or external client requests, such as UVD request, etc. + */ +static int iceland_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + uint32_t i; + + struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table = + hwmgr->dyn_state.vddc_dependency_on_sclk; + struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table = + hwmgr->dyn_state.vddc_dependency_on_mclk; + struct phm_cac_leakage_table *std_voltage_table = + hwmgr->dyn_state.cac_leakage_table; + + PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, + "SCLK dependency table is missing. This table is mandatory", return -1); + PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1, + "SCLK dependency table has to have is missing. This table is mandatory", return -1); + + PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, + "MCLK dependency table is missing. This table is mandatory", return -1); + PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1, + "VMCLK dependency table has to have is missing. This table is mandatory", return -1); + + /* clear the state table to reset everything to default */ + memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table)); + iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.sclk_table, SMU71_MAX_LEVELS_GRAPHICS); + iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.mclk_table, SMU71_MAX_LEVELS_MEMORY); + iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.vddc_table, SMU71_MAX_LEVELS_VDDC); + iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.vdd_ci_table, SMU71_MAX_LEVELS_VDDCI); + iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.mvdd_table, SMU71_MAX_LEVELS_MVDD); + + PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, + "SCLK dependency table is missing. This table is mandatory", return -1); + /* Initialize Sclk DPM table based on allow Sclk values*/ + data->dpm_table.sclk_table.count = 0; + + for (i = 0; i < allowed_vdd_sclk_table->count; i++) { + if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value != + allowed_vdd_sclk_table->entries[i].clk) { + data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = + allowed_vdd_sclk_table->entries[i].clk; + data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */ + data->dpm_table.sclk_table.count++; + } + } + + PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, + "MCLK dependency table is missing. This table is mandatory", return -1); + /* Initialize Mclk DPM table based on allow Mclk values */ + data->dpm_table.mclk_table.count = 0; + for (i = 0; i < allowed_vdd_mclk_table->count; i++) { + if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value != + allowed_vdd_mclk_table->entries[i].clk) { + data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = + allowed_vdd_mclk_table->entries[i].clk; + data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */ + data->dpm_table.mclk_table.count++; + } + } + + /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */ + for (i = 0; i < allowed_vdd_sclk_table->count; i++) { + data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; + data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage; + /* param1 is for corresponding std voltage */ + data->dpm_table.vddc_table.dpm_levels[i].enabled = 1; + } + + data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; + allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk; + + if (NULL != allowed_vdd_mclk_table) { + /* Initialize Vddci DPM table based on allow Mclk values */ + for (i = 0; i < allowed_vdd_mclk_table->count; i++) { + data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; + data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1; + } + data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count; + } + + allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk; + + if (NULL != allowed_vdd_mclk_table) { + /* + * Initialize MVDD DPM table based on allow Mclk + * values + */ + for (i = 0; i < allowed_vdd_mclk_table->count; i++) { + data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; + data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1; + } + data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; + } + + /* setup PCIE gen speed levels*/ + iceland_setup_default_pcie_tables(hwmgr); + + /* save a copy of the default DPM table*/ + memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct iceland_dpm_table)); + + return 0; +} + +/** + * @brief PhwIceland_GetVoltageOrder + * Returns index of requested voltage record in lookup(table) + * @param hwmgr - pointer to hardware manager + * @param lookutab - lookup list to search in + * @param voltage - voltage to look for + * @return 0 on success + */ +uint8_t iceland_get_voltage_index(phm_ppt_v1_voltage_lookup_table *look_up_table, + uint16_t voltage) +{ + uint8_t count = (uint8_t) (look_up_table->count); + uint8_t i; + + PP_ASSERT_WITH_CODE((NULL != look_up_table), "Lookup Table empty.", return 0;); + PP_ASSERT_WITH_CODE((0 != count), "Lookup Table empty.", return 0;); + + for (i = 0; i < count; i++) { + /* find first voltage equal or bigger than requested */ + if (look_up_table->entries[i].us_vdd >= voltage) + return i; + } + + /* voltage is bigger than max voltage in the table */ + return i-1; +} + + +static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr, + pp_atomctrl_voltage_table_entry *tab, uint16_t *hi, + uint16_t *lo) +{ + uint16_t v_index; + bool vol_found = false; + *hi = tab->value * VOLTAGE_SCALE; + *lo = tab->value * VOLTAGE_SCALE; + + /* SCLK/VDDC Dependency Table has to exist. */ + PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk, + "The SCLK/VDDC Dependency Table does not exist.\n", + return -EINVAL); + + if (NULL == hwmgr->dyn_state.cac_leakage_table) { + pr_warning("CAC Leakage Table does not exist, using vddc.\n"); + return 0; + } + + /* + * Since voltage in the sclk/vddc dependency table is not + * necessarily in ascending order because of ELB voltage + * patching, loop through entire list to find exact voltage. + */ + for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) { + if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { + vol_found = true; + if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) { + *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE); + } else { + pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n"); + *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); + } + break; + } + } + + /* + * If voltage is not found in the first pass, loop again to + * find the best match, equal or higher value. + */ + if (!vol_found) { + for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) { + if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { + vol_found = true; + if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) { + *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE; + } else { + pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table."); + *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); + } + break; + } + } + + if (!vol_found) + pr_warning("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n"); + } + + return 0; +} + +static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr, + pp_atomctrl_voltage_table_entry *tab, + SMU71_Discrete_VoltageLevel *smc_voltage_tab) { + int result; + + + result = iceland_get_std_voltage_value_sidd(hwmgr, tab, + &smc_voltage_tab->StdVoltageHiSidd, + &smc_voltage_tab->StdVoltageLoSidd); + if (0 != result) { + smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE; + smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE; + } + + smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE); + CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd); + CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd); + + return 0; +} + +/** + * Vddc table preparation for SMC. + * + * @param hwmgr the address of the hardware manager + * @param table the SMC DPM table structure to be populated + * @return always 0 + */ +static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + unsigned int count; + int result; + + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + table->VddcLevelCount = data->vddc_voltage_table.count; + for (count = 0; count < table->VddcLevelCount; count++) { + result = iceland_populate_smc_voltage_table(hwmgr, + &data->vddc_voltage_table.entries[count], + &table->VddcLevel[count]); + PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL); + + /* GPIO voltage control */ + if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) + table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low; + else if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) + table->VddcLevel[count].Smio = 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); + + return 0; +} + +/** + * Vddci table preparation for SMC. + * + * @param *hwmgr The address of the hardware manager. + * @param *table The SMC DPM table structure to be populated. + * @return 0 + */ +static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + int result; + uint32_t count; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + table->VddciLevelCount = data->vddci_voltage_table.count; + for (count = 0; count < table->VddciLevelCount; count++) { + result = iceland_populate_smc_voltage_table(hwmgr, + &data->vddci_voltage_table.entries[count], + &table->VddciLevel[count]); + PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDCI voltage table", return -EINVAL); + + /* GPIO voltage control */ + if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) + table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low; + else + table->VddciLevel[count].Smio = 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); + + return 0; +} + +/** + * Mvdd table preparation for SMC. + * + * @param *hwmgr The address of the hardware manager. + * @param *table The SMC DPM table structure to be populated. + * @return 0 + */ +static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + int result; + uint32_t count; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + table->MvddLevelCount = data->mvdd_voltage_table.count; + for (count = 0; count < table->MvddLevelCount; count++) { + result = iceland_populate_smc_voltage_table(hwmgr, + &data->mvdd_voltage_table.entries[count], + &table->MvddLevel[count]); + PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDCI voltage table", return -EINVAL); + + /* GPIO voltage control */ + if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) + table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low; + else + table->MvddLevel[count].Smio = 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount); + + return 0; +} + +/** + * Convert a voltage value in mv unit to VID number required by SMU firmware + */ +static uint8_t convert_to_vid(uint16_t vddc) +{ + return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25); +} + +int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr) +{ + int i; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint8_t * hi_vid = data->power_tune_table.BapmVddCVidHiSidd; + uint8_t * lo_vid = data->power_tune_table.BapmVddCVidLoSidd; + + PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table, + "The CAC Leakage table does not exist!", return -EINVAL); + PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8, + "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL); + PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count, + "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) { + for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) { + lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1); + hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2); + } + } else { + PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL); + } + + return 0; +} + +int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr) +{ + int i; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint8_t *vid = data->power_tune_table.VddCVid; + + PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8, + "There should never be more than 8 entries for VddcVid!!!", + return -EINVAL); + + for (i = 0; i < (int)data->vddc_voltage_table.count; i++) { + vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value); + } + + return 0; +} + +/** + * Preparation of voltage tables for SMC. + * + * @param hwmgr the address of the hardware manager + * @param table the SMC DPM table structure to be populated + * @return always 0 + */ + +int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + int result; + + result = iceland_populate_smc_vddc_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate VDDC voltage table to SMC", return -1); + + result = iceland_populate_smc_vdd_ci_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate VDDCI voltage table to SMC", return -1); + + result = iceland_populate_smc_mvdd_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate MVDD voltage table to SMC", return -1); + + return 0; +} + + +/** + * Re-generate the DPM level mask value + * @param hwmgr the address of the hardware manager + */ +static uint32_t iceland_get_dpm_level_enable_mask_value( + struct iceland_single_dpm_table * dpm_table) +{ + uint32_t i; + uint32_t mask_value = 0; + + for (i = dpm_table->count; i > 0; i--) { + mask_value = mask_value << 1; + + if (dpm_table->dpm_levels[i-1].enabled) + mask_value |= 0x1; + else + mask_value &= 0xFFFFFFFE; + } + return mask_value; +} + +int iceland_populate_memory_timing_parameters( + struct pp_hwmgr *hwmgr, + uint32_t engine_clock, + uint32_t memory_clock, + struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs + ) +{ + uint32_t dramTiming; + uint32_t dramTiming2; + uint32_t burstTime; + int result; + + result = atomctrl_set_engine_dram_timings_rv770(hwmgr, + engine_clock, memory_clock); + + PP_ASSERT_WITH_CODE(result == 0, + "Error calling VBIOS to set DRAM_TIMING.", return result); + + dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); + + arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming); + arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2); + arb_regs->McArbBurstTime = (uint8_t)burstTime; + + return 0; +} + +/** + * Setup parameters for the MC ARB. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + * This function is to be called from the SetPowerState table. + */ +int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + int result = 0; + SMU71_Discrete_MCArbDramTimingTable arb_regs; + uint32_t i, j; + + memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable)); + + for (i = 0; i < data->dpm_table.sclk_table.count; i++) { + for (j = 0; j < data->dpm_table.mclk_table.count; j++) { + result = iceland_populate_memory_timing_parameters + (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value, + data->dpm_table.mclk_table.dpm_levels[j].value, + &arb_regs.entries[i][j]); + + if (0 != result) { + break; + } + } + } + + if (0 == result) { + result = iceland_copy_bytes_to_smc( + hwmgr->smumgr, + data->arb_table_start, + (uint8_t *)&arb_regs, + sizeof(SMU71_Discrete_MCArbDramTimingTable), + data->sram_end + ); + } + + return result; +} + +static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + struct iceland_dpm_table *dpm_table = &data->dpm_table; + uint32_t i; + + /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */ + for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { + table->LinkLevel[i].PcieGenSpeed = + (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; + table->LinkLevel[i].PcieLaneCount = + (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); + table->LinkLevel[i].EnabledForActivity = + 1; + table->LinkLevel[i].SPC = + (uint8_t)(data->pcie_spc_cap & 0xff); + table->LinkLevel[i].DownThreshold = + PP_HOST_TO_SMC_UL(5); + table->LinkLevel[i].UpThreshold = + PP_HOST_TO_SMC_UL(30); + } + + data->smc_state_table.LinkLevelCount = + (uint8_t)dpm_table->pcie_speed_table.count; + data->dpm_level_enable_mask.pcie_dpm_enable_mask = + iceland_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); + + return 0; +} + +static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + return 0; +} + +uint8_t iceland_get_voltage_id(pp_atomctrl_voltage_table *voltage_table, + uint32_t voltage) +{ + uint8_t count = (uint8_t) (voltage_table->count); + uint8_t i = 0; + + PP_ASSERT_WITH_CODE((NULL != voltage_table), + "Voltage Table empty.", return 0;); + PP_ASSERT_WITH_CODE((0 != count), + "Voltage Table empty.", return 0;); + + for (i = 0; i < count; i++) { + /* find first voltage bigger than requested */ + if (voltage_table->entries[i].value >= voltage) + return i; + } + + /* voltage is bigger than max voltage in the table */ + return i - 1; +} + +static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + return 0; +} + +static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + return 0; +} + +static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + return 0; +} + + +static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *tab) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) + tab->SVI2Enable |= VDDC_ON_SVI2; + + if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) + tab->SVI2Enable |= VDDCI_ON_SVI2; + else + tab->MergedVddci = 1; + + if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) + tab->SVI2Enable |= MVDD_ON_SVI2; + + PP_ASSERT_WITH_CODE( tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) && + (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL); + + return 0; +} + +static int iceland_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr, + struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table, + uint32_t clock, uint32_t *vol) +{ + uint32_t i = 0; + + /* clock - voltage dependency table is empty table */ + if (allowed_clock_voltage_table->count == 0) + return -EINVAL; + + for (i = 0; i < allowed_clock_voltage_table->count; i++) { + /* find first sclk bigger than request */ + if (allowed_clock_voltage_table->entries[i].clk >= clock) { + *vol = allowed_clock_voltage_table->entries[i].v; + return 0; + } + } + + /* sclk is bigger than max sclk in the dependence table */ + *vol = allowed_clock_voltage_table->entries[i - 1].v; + + return 0; +} + +static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock, + bool strobe_mode) +{ + uint8_t mc_para_index; + + if (strobe_mode) { + if (memory_clock < 12500) { + mc_para_index = 0x00; + } else if (memory_clock > 47500) { + mc_para_index = 0x0f; + } else { + mc_para_index = (uint8_t)((memory_clock - 10000) / 2500); + } + } else { + if (memory_clock < 65000) { + mc_para_index = 0x00; + } else if (memory_clock > 135000) { + mc_para_index = 0x0f; + } else { + mc_para_index = (uint8_t)((memory_clock - 60000) / 5000); + } + } + + return mc_para_index; +} + +static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock) +{ + uint8_t mc_para_index; + + if (memory_clock < 10000) { + mc_para_index = 0; + } else if (memory_clock >= 80000) { + mc_para_index = 0x0f; + } else { + mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1); + } + + return mc_para_index; +} + +static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl, + uint32_t sclk, uint32_t *p_shed) +{ + unsigned int i; + + /* use the minimum phase shedding */ + *p_shed = 1; + + /* + * PPGen ensures the phase shedding limits table is sorted + * from lowest voltage/sclk/mclk to highest voltage/sclk/mclk. + * VBIOS ensures the phase shedding masks table is sorted from + * least phases enabled (phase shedding on) to most phases + * enabled (phase shedding off). + */ + for (i = 0; i < pl->count; i++) { + if (sclk < pl->entries[i].Sclk) { + /* Enable phase shedding */ + *p_shed = i; + break; + } + } + + return 0; +} + +static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl, + uint32_t memory_clock, uint32_t *p_shed) +{ + unsigned int i; + + /* use the minimum phase shedding */ + *p_shed = 1; + + /* + * PPGen ensures the phase shedding limits table is sorted + * from lowest voltage/sclk/mclk to highest voltage/sclk/mclk. + * VBIOS ensures the phase shedding masks table is sorted from + * least phases enabled (phase shedding on) to most phases + * enabled (phase shedding off). + */ + for (i = 0; i < pl->count; i++) { + if (memory_clock < pl->entries[i].Mclk) { + /* Enable phase shedding */ + *p_shed = i; + break; + } + } + + return 0; +} + +/** + * Populates the SMC MCLK structure using the provided memory clock + * + * @param hwmgr the address of the hardware manager + * @param memory_clock the memory clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +static int iceland_calculate_mclk_params( + struct pp_hwmgr *hwmgr, + uint32_t memory_clock, + SMU71_Discrete_MemoryLevel *mclk, + bool strobe_mode, + bool dllStateOn + ) +{ + const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; + uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; + uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL; + uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL; + uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL; + uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1; + uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2; + uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1; + uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2; + + pp_atomctrl_memory_clock_param mpll_param; + int result; + + result = atomctrl_get_memory_pll_dividers_si(hwmgr, + memory_clock, &mpll_param, strobe_mode); + PP_ASSERT_WITH_CODE(0 == result, + "Error retrieving Memory Clock Parameters from VBIOS.", return result); + + /* MPLL_FUNC_CNTL setup*/ + mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl); + + /* MPLL_FUNC_CNTL_1 setup*/ + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf); + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac); + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode); + + /* MPLL_AD_FUNC_CNTL setup*/ + mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl, + MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); + + if (data->is_memory_GDDR5) { + /* MPLL_DQ_FUNC_CNTL setup*/ + mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, + MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel); + mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, + MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MemorySpreadSpectrumSupport)) { + /* + ************************************ + Fref = Reference Frequency + NF = Feedback divider ratio + NR = Reference divider ratio + Fnom = Nominal VCO output frequency = Fref * NF / NR + Fs = Spreading Rate + D = Percentage down-spread / 2 + Fint = Reference input frequency to PFD = Fref / NR + NS = Spreading rate divider ratio = int(Fint / (2 * Fs)) + CLKS = NS - 1 = ISS_STEP_NUM[11:0] + NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2) + CLKV = 65536 * NV = ISS_STEP_SIZE[25:0] + ************************************* + */ + pp_atomctrl_internal_ss_info ss_info; + uint32_t freq_nom; + uint32_t tmp; + uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr); + + /* for GDDR5 for all modes and DDR3 */ + if (1 == mpll_param.qdr) + freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider); + else + freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider); + + /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/ + tmp = (freq_nom / reference_clock); + tmp = tmp * tmp; + + if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) { + /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */ + /* ss.Info.speed_spectrum_rate -- in unit of khz */ + /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */ + /* = reference_clock * 5 / speed_spectrum_rate */ + uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate; + + /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */ + /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */ + uint32_t clkv = + (uint32_t)((((131 * ss_info.speed_spectrum_percentage * + ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom); + + mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv); + mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks); + } + } + + /* MCLK_PWRMGT_CNTL setup */ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn); + + + /* Save the result data to outpupt memory level structure */ + mclk->MclkFrequency = memory_clock; + mclk->MpllFuncCntl = mpll_func_cntl; + mclk->MpllFuncCntl_1 = mpll_func_cntl_1; + mclk->MpllFuncCntl_2 = mpll_func_cntl_2; + mclk->MpllAdFuncCntl = mpll_ad_func_cntl; + mclk->MpllDqFuncCntl = mpll_dq_func_cntl; + mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; + mclk->DllCntl = dll_cntl; + mclk->MpllSs1 = mpll_ss1; + mclk->MpllSs2 = mpll_ss2; + + return 0; +} + +static int iceland_populate_single_memory_level( + struct pp_hwmgr *hwmgr, + uint32_t memory_clock, + SMU71_Discrete_MemoryLevel *memory_level + ) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + int result = 0; + bool dllStateOn; + struct cgs_display_info info = {0}; + + + if (NULL != hwmgr->dyn_state.vddc_dependency_on_mclk) { + result = iceland_get_dependecy_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result); + } + + if (data->vdd_ci_control == ICELAND_VOLTAGE_CONTROL_NONE) { + memory_level->MinVddci = memory_level->MinVddc; + } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) { + result = iceland_get_dependecy_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddci_dependency_on_mclk, + memory_clock, + &memory_level->MinVddci); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result); + } + + if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) { + result = iceland_get_dependecy_volt_by_clk(hwmgr, + hwmgr->dyn_state.mvdd_dependency_on_mclk, memory_clock, &memory_level->MinMvdd); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinMVDD voltage value from memory MVDD voltage dependency table", return result); + } + + memory_level->MinVddcPhases = 1; + + if (data->vddc_phase_shed_control) { + iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table, + memory_clock, &memory_level->MinVddcPhases); + } + + memory_level->EnabledForThrottle = 1; + memory_level->EnabledForActivity = 1; + memory_level->UpHyst = 0; + memory_level->DownHyst = 100; + memory_level->VoltageDownHyst = 0; + + /* Indicates maximum activity level for this performance level.*/ + memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target; + memory_level->StutterEnable = 0; + memory_level->StrobeEnable = 0; + memory_level->EdcReadEnable = 0; + memory_level->EdcWriteEnable = 0; + memory_level->RttEnable = 0; + + /* default set to low watermark. Highest level will be set to high later.*/ + memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + cgs_get_active_displays_info(hwmgr->device, &info); + data->display_timing.num_existing_displays = info.display_count; + + //if ((data->mclk_stutter_mode_threshold != 0) && + // (memory_clock <= data->mclk_stutter_mode_threshold) && + // (data->is_uvd_enabled == 0) + // && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1) + // && (data->display_timing.num_existing_displays <= 2) + // && (data->display_timing.num_existing_displays != 0)) + // memory_level->StutterEnable = 1; + + /* decide strobe mode*/ + memory_level->StrobeEnable = (data->mclk_strobe_mode_threshold != 0) && + (memory_clock <= data->mclk_strobe_mode_threshold); + + /* decide EDC mode and memory clock ratio*/ + if (data->is_memory_GDDR5) { + memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock, + memory_level->StrobeEnable); + + if ((data->mclk_edc_enable_threshold != 0) && + (memory_clock > data->mclk_edc_enable_threshold)) { + memory_level->EdcReadEnable = 1; + } + + if ((data->mclk_edc_wr_enable_threshold != 0) && + (memory_clock > data->mclk_edc_wr_enable_threshold)) { + memory_level->EdcWriteEnable = 1; + } + + if (memory_level->StrobeEnable) { + if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >= + ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) { + dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; + } else { + dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0; + } + + } else { + dllStateOn = data->dll_defaule_on; + } + } else { + memory_level->StrobeRatio = + iceland_get_ddr3_mclk_frequency_ratio(memory_clock); + dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; + } + + result = iceland_calculate_mclk_params(hwmgr, + memory_clock, memory_level, memory_level->StrobeEnable, dllStateOn); + + if (0 == result) { + memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases); + memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE); + memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE); + /* MCLK frequency in units of 10KHz*/ + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency); + /* Indicates maximum activity level for this performance level.*/ + CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2); + } + + return result; +} + +/** + * Populates the SMC MVDD structure using the provided memory clock. + * + * @param hwmgr the address of the hardware manager + * @param mclk the MCLK value to be used in the decision if MVDD should be high or low. + * @param voltage the SMC VOLTAGE structure to be populated + */ +int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, SMU71_Discrete_VoltageLevel *voltage) +{ + const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + uint32_t i = 0; + + if (ICELAND_VOLTAGE_CONTROL_NONE != data->mvdd_control) { + /* find mvdd value which clock is more than request */ + for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) { + if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) { + /* Always round to higher voltage. */ + voltage->Voltage = data->mvdd_voltage_table.entries[i].value; + break; + } + } + + PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count, + "MVDD Voltage is outside the supported range.", return -1); + + } else { + return -1; + } + + return 0; +} + + +static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + int result = 0; + const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + pp_atomctrl_clock_dividers_vi dividers; + SMU71_Discrete_VoltageLevel voltage_level; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2; + uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; + uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; + + /* The ACPI state should not do DPM on DC (or ever).*/ + table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; + + if (data->acpi_vddc) + table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE); + else + table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pp_table * VOLTAGE_SCALE); + + table->ACPILevel.MinVddcPhases = (data->vddc_phase_shed_control) ? 0 : 1; + + /* assign zero for now*/ + table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr); + + /* get the engine clock dividers for this clock value*/ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, + table->ACPILevel.SclkFrequency, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", return result); + + /* divider ID for required SCLK*/ + table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; + table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + table->ACPILevel.DeepSleepDivId = 0; + + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_RESET, 1); + spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, + CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4); + + table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; + table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; + table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + table->ACPILevel.CcPwrDynRm = 0; + table->ACPILevel.CcPwrDynRm1 = 0; + + + /* For various features to be enabled/disabled while this level is active.*/ + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); + /* SCLK frequency in units of 10KHz*/ + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); + + table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc; + table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases; + + /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/ + + if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level)) + table->MemoryACPILevel.MinMvdd = + PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE); + else + table->MemoryACPILevel.MinMvdd = 0; + + /* Force reset on DLL*/ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1); + + /* Disable DLL in ACPIState*/ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0); + + /* Enable DLL bypass signal*/ + dll_cntl = PHM_SET_FIELD(dll_cntl, + DLL_CNTL, MRDCK0_BYPASS, 0); + dll_cntl = PHM_SET_FIELD(dll_cntl, + DLL_CNTL, MRDCK1_BYPASS, 0); + + table->MemoryACPILevel.DllCntl = + PP_HOST_TO_SMC_UL(dll_cntl); + table->MemoryACPILevel.MclkPwrmgtCntl = + PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl); + table->MemoryACPILevel.MpllAdFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL); + table->MemoryACPILevel.MpllDqFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL); + table->MemoryACPILevel.MpllFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL); + table->MemoryACPILevel.MpllFuncCntl_1 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1); + table->MemoryACPILevel.MpllFuncCntl_2 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2); + table->MemoryACPILevel.MpllSs1 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1); + table->MemoryACPILevel.MpllSs2 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2); + + table->MemoryACPILevel.EnabledForThrottle = 0; + table->MemoryACPILevel.EnabledForActivity = 0; + table->MemoryACPILevel.UpHyst = 0; + table->MemoryACPILevel.DownHyst = 100; + table->MemoryACPILevel.VoltageDownHyst = 0; + /* Indicates maximum activity level for this performance level.*/ + table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); + + table->MemoryACPILevel.StutterEnable = 0; + table->MemoryACPILevel.StrobeEnable = 0; + table->MemoryACPILevel.EdcReadEnable = 0; + table->MemoryACPILevel.EdcWriteEnable = 0; + table->MemoryACPILevel.RttEnable = 0; + + return result; +} + +static int iceland_find_boot_level(struct iceland_single_dpm_table *table, uint32_t value, uint32_t *boot_level) +{ + int result = 0; + uint32_t i; + + for (i = 0; i < table->count; i++) { + if (value == table->dpm_levels[i].value) { + *boot_level = i; + result = 0; + } + } + return result; +} + +/** + * Calculates the SCLK dividers using the provided engine clock + * + * @param hwmgr the address of the hardware manager + * @param engine_clock the engine clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr, + uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk) +{ + const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + pp_atomctrl_clock_dividers_vi dividers; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t reference_clock; + uint32_t reference_divider; + uint32_t fbdiv; + int result; + + /* get the engine clock dividers for this clock value*/ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", return result); + + /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/ + reference_clock = atomctrl_get_reference_clock(hwmgr); + + reference_divider = 1 + dividers.uc_pll_ref_div; + + /* low 14 bits is fraction and high 12 bits is divider*/ + fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF; + + /* SPLL_FUNC_CNTL setup*/ + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div); + + /* SPLL_FUNC_CNTL_3 setup*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, + CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv); + + /* set to use fractional accumulation*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, + CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EngineSpreadSpectrumSupport)) { + pp_atomctrl_internal_ss_info ss_info; + + uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div; + if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) { + /* + * ss_info.speed_spectrum_percentage -- in unit of 0.01% + * ss_info.speed_spectrum_rate -- in unit of khz + */ + /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */ + uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate); + + /* clkv = 2 * D * fbdiv / NS */ + uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000); + + cg_spll_spread_spectrum = + PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS); + cg_spll_spread_spectrum = + PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1); + cg_spll_spread_spectrum_2 = + PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV); + } + } + + sclk->SclkFrequency = engine_clock; + sclk->CgSpllFuncCntl3 = spll_func_cntl_3; + sclk->CgSpllFuncCntl4 = spll_func_cntl_4; + sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; + sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; + sclk->SclkDid = (uint8_t)dividers.pll_post_divider; + + return 0; +} + +static uint8_t iceland_get_sleep_divider_id_from_clock(struct pp_hwmgr *hwmgr, + uint32_t engine_clock, uint32_t min_engine_clock_in_sr) +{ + uint32_t i, temp; + uint32_t min = (min_engine_clock_in_sr > ICELAND_MINIMUM_ENGINE_CLOCK) ? + min_engine_clock_in_sr : ICELAND_MINIMUM_ENGINE_CLOCK; + + PP_ASSERT_WITH_CODE((engine_clock >= min), + "Engine clock can't satisfy stutter requirement!", return 0); + + for (i = ICELAND_MAX_DEEPSLEEP_DIVIDER_ID;; i--) { + temp = engine_clock / (1 << i); + + if(temp >= min || i == 0) + break; + } + return (uint8_t)i; +} + +/** + * Populates single SMC SCLK structure using the provided engine clock + * + * @param hwmgr the address of the hardware manager + * @param engine_clock the engine clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr, + uint32_t engine_clock, uint16_t sclk_activity_level_threshold, + SMU71_Discrete_GraphicsLevel *graphic_level) +{ + int result; + uint32_t threshold; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level); + + + /* populate graphics levels*/ + result = iceland_get_dependecy_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock, &graphic_level->MinVddc); + PP_ASSERT_WITH_CODE((0 == result), + "can not find VDDC voltage value for VDDC engine clock dependency table", return result); + + /* SCLK frequency in units of 10KHz*/ + graphic_level->SclkFrequency = engine_clock; + + /* + * Minimum VDDC phases required to support this level, it + * should get from dependence table. + */ + graphic_level->MinVddcPhases = 1; + + if (data->vddc_phase_shed_control) { + iceland_populate_phase_value_based_on_sclk(hwmgr, + hwmgr->dyn_state.vddc_phase_shed_limits_table, + engine_clock, + &graphic_level->MinVddcPhases); + } + + /* Indicates maximum activity level for this performance level. 50% for now*/ + graphic_level->ActivityLevel = sclk_activity_level_threshold; + + graphic_level->CcPwrDynRm = 0; + graphic_level->CcPwrDynRm1 = 0; + /* this level can be used if activity is high enough.*/ + graphic_level->EnabledForActivity = 1; + /* this level can be used for throttling.*/ + graphic_level->EnabledForThrottle = 1; + graphic_level->UpHyst = 0; + graphic_level->DownHyst = 100; + graphic_level->VoltageDownHyst = 0; + graphic_level->PowerThrottle = 0; + + threshold = engine_clock * data->fast_watermark_threshold / 100; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) { + graphic_level->DeepSleepDivId = + iceland_get_sleep_divider_id_from_clock(hwmgr, engine_clock, + data->display_timing.min_clock_insr); + } + + /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/ + graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + if (0 == result) { + graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE); + /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/ + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1); + } + + return result; +} + +/** + * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states + * + * @param hwmgr the address of the hardware manager + */ +static int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + struct iceland_dpm_table *dpm_table = &data->dpm_table; + int result = 0; + uint32_t level_array_adress = data->dpm_table_start + + offsetof(SMU71_Discrete_DpmTable, GraphicsLevel); + + uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) * SMU71_MAX_LEVELS_GRAPHICS; + SMU71_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel; + uint32_t i; + uint8_t highest_pcie_level_enabled = 0, lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0, count = 0; + memset(levels, 0x00, level_array_size); + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + result = iceland_populate_single_graphic_level(hwmgr, + dpm_table->sclk_table.dpm_levels[i].value, + (uint16_t)data->activity_target[i], + &(data->smc_state_table.GraphicsLevel[i])); + if (0 != result) + return result; + + /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ + if (i > 1) + data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; + } + + /* set highest level watermark to high */ + if (dpm_table->sclk_table.count > 1) + data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + + data->smc_state_table.GraphicsDpmLevelCount = + (uint8_t)dpm_table->sclk_table.count; + data->dpm_level_enable_mask.sclk_dpm_enable_mask = + iceland_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); + + while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (highest_pcie_level_enabled + 1))) != 0) { + highest_pcie_level_enabled++; + } + + while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << lowest_pcie_level_enabled)) == 0) { + lowest_pcie_level_enabled++; + } + + while ((count < highest_pcie_level_enabled) && + ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) { + count++; + } + + mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ? + (lowest_pcie_level_enabled + 1 + count) : highest_pcie_level_enabled; + + /* set pcieDpmLevel to highest_pcie_level_enabled*/ + for (i = 2; i < dpm_table->sclk_table.count; i++) { + data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled; + } + + /* set pcieDpmLevel to lowest_pcie_level_enabled*/ + data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled; + + /* set pcieDpmLevel to mid_pcie_level_enabled*/ + data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled; + + for (i = 0; i < dpm_table->sclk_table.count; i++) + + /* level count will send to smc once at init smc table and never change*/ + result = iceland_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end); + + if (0 != result) + return result; + + return 0; +} + +/** + * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states + * + * @param hwmgr the address of the hardware manager + */ + +static int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + struct iceland_dpm_table *dpm_table = &data->dpm_table; + int result; + /* populate MCLK dpm table to SMU7 */ + uint32_t level_array_adress = data->dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel); + uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY; + SMU71_Discrete_MemoryLevel *levels = data->smc_state_table.MemoryLevel; + uint32_t i; + + memset(levels, 0x00, level_array_size); + + for (i = 0; i < dpm_table->mclk_table.count; i++) { + PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), + "can not populate memory level as memory clock is zero", return -1); + result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value, + &(data->smc_state_table.MemoryLevel[i])); + if (0 != result) { + return result; + } + } + + /* Only enable level 0 for now.*/ + data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; + + /* + * in order to prevent MC activity from stutter mode to push DPM up. + * the UVD change complements this by putting the MCLK in a higher state + * by default such that we are not effected by up threshold or and MCLK DPM latency. + */ + data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F; + CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.MemoryLevel[0].ActivityLevel); + + data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count; + data->dpm_level_enable_mask.mclk_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); + /* set highest level watermark to high*/ + data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; + + /* level count will send to smc once at init smc table and never change*/ + result = iceland_copy_bytes_to_smc(hwmgr->smumgr, + level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end); + + if (0 != result) { + return result; + } + + return 0; +} + +struct ICELAND_DLL_SPEED_SETTING +{ + uint16_t Min; /* Minimum Data Rate*/ + uint16_t Max; /* Maximum Data Rate*/ + uint32_t dll_speed; /* The desired DLL_SPEED setting*/ +}; + +static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_Ulv *pstate) +{ + int result = 0; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + uint32_t voltage_response_time, ulv_voltage; + + pstate->CcPwrDynRm = 0; + pstate->CcPwrDynRm1 = 0; + + //backbiasResponseTime is use for ULV state voltage value. + result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage); + PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;); + + if(!ulv_voltage) { + data->ulv.ulv_supported = false; + return 0; + } + + if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 != data->voltage_control) { + /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */ + if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) { + pstate->VddcOffset = 0; + } + else { + /* used in SMIO Mode. not implemented for now. this is backup only for CI. */ + pstate->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage); + } + } else { + /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */ + if(ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) { + pstate->VddcOffsetVid = 0; + } else { + /* used in SVI2 Mode */ + pstate->VddcOffsetVid = (uint8_t)((hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage) * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); + } + } + + /* used in SVI2 Mode to shed phase */ + pstate->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1; + + if (0 == result) { + CONVERT_FROM_HOST_TO_SMC_UL(pstate->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(pstate->CcPwrDynRm1); + CONVERT_FROM_HOST_TO_SMC_US(pstate->VddcOffset); + } + + return result; +} + +static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr, SMU71_Discrete_Ulv *ulv) +{ + return iceland_populate_ulv_level(hwmgr, ulv); +} + +static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + uint8_t count, level; + + count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count); + + for (level = 0; level < count; level++) { + if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk + >= data->vbios_boot_state.sclk_bootup_value) { + data->smc_state_table.GraphicsBootLevel = level; + break; + } + } + + count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count); + + for (level = 0; level < count; level++) { + if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk + >= data->vbios_boot_state.mclk_bootup_value) { + data->smc_state_table.MemoryBootLevel = level; + break; + } + } + + return 0; +} + +/** + * Initializes the SMC table and uploads it + * + * @param hwmgr the address of the powerplay hardware manager. + * @param pInput the pointer to input data (PowerState) + * @return always 0 + */ +int iceland_init_smc_table(struct pp_hwmgr *hwmgr) +{ + int result; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + SMU71_Discrete_DpmTable *table = &(data->smc_state_table); + const struct phw_iceland_ulv_parm *ulv = &(data->ulv); + + result = iceland_setup_default_dpm_tables(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to setup default DPM tables!", return result;); + memset(&(data->smc_state_table), 0x00, sizeof(data->smc_state_table)); + + if (ICELAND_VOLTAGE_CONTROL_NONE != data->voltage_control) { + iceland_populate_smc_voltage_tables(hwmgr, table); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition)) { + table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StepVddc)) { + table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; + } + + if (data->is_memory_GDDR5) { + table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; + } + + if (ulv->ulv_supported) { + result = iceland_populate_ulv_state(hwmgr, &data->ulv_setting); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ULV state!", return result;); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_ULV_PARAMETER, ulv->ch_ulv_parameter); + } + + result = iceland_populate_smc_link_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Link Level!", return result;); + + result = iceland_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Graphics Level!", return result;); + + result = iceland_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Memory Level!", return result;); + + result = iceland_populate_smc_acpi_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACPI Level!", return result;); + + result = iceland_populate_smc_vce_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize VCE Level!", return result;); + + result = iceland_populate_smc_acp_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACP Level!", return result;); + + result = iceland_populate_smc_samu_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize SAMU Level!", return result;); + + /* + * Since only the initial state is completely set up at this + * point (the other states are just copies of the boot state) + * we only need to populate the ARB settings for the initial + * state. + */ + result = iceland_program_memory_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to Write ARB settings for the initial state.", return result;); + + result = iceland_populate_smc_uvd_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize UVD Level!", return result;); + + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + /* find boot level from dpm table */ + result = iceland_find_boot_level(&(data->dpm_table.sclk_table), + data->vbios_boot_state.sclk_bootup_value, + (uint32_t *)&(data->smc_state_table.GraphicsBootLevel)); + + if (result) + pr_warning("VBIOS did not find boot engine clock value in dependency table.\n"); + + result = iceland_find_boot_level(&(data->dpm_table.mclk_table), + data->vbios_boot_state.mclk_bootup_value, + (uint32_t *)&(data->smc_state_table.MemoryBootLevel)); + + if (result) + pr_warning("VBIOS did not find boot memory clock value in dependency table.\n"); + + table->BootVddc = data->vbios_boot_state.vddc_bootup_value; + if (ICELAND_VOLTAGE_CONTROL_NONE == data->vdd_ci_control) { + table->BootVddci = table->BootVddc; + } + else { + table->BootVddci = data->vbios_boot_state.vddci_bootup_value; + } + table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value; + + result = iceland_populate_smc_initial_state(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result); + + result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result); + + table->GraphicsVoltageChangeEnable = 1; + table->GraphicsThermThrottleEnable = 1; + table->GraphicsInterval = 1; + table->VoltageInterval = 1; + table->ThermalInterval = 1; + table->TemperatureLimitHigh = + (data->thermal_temp_setting.temperature_high * + ICELAND_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + table->TemperatureLimitLow = + (data->thermal_temp_setting.temperature_low * + ICELAND_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + table->MemoryVoltageChangeEnable = 1; + table->MemoryInterval = 1; + table->VoltageResponseTime = 0; + table->PhaseResponseTime = 0; + table->MemoryThermThrottleEnable = 1; + table->PCIeBootLinkLevel = 0; + table->PCIeGenInterval = 1; + + result = iceland_populate_smc_svi2_config(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate SVI2 setting!", return result); + + table->ThermGpio = 17; + table->SclkStepSize = 0x4000; + + CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); + CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); + CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); + + table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE); + table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE); + table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE); + + /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ + result = iceland_copy_bytes_to_smc(hwmgr->smumgr, data->dpm_table_start + + offsetof(SMU71_Discrete_DpmTable, SystemFlags), + (uint8_t *)&(table->SystemFlags), + sizeof(SMU71_Discrete_DpmTable) - 3 * sizeof(SMU71_PIDController), + data->sram_end); + + PP_ASSERT_WITH_CODE(0 == result, + "Failed to upload dpm data to SMC memory!", return result); + + /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */ + result = iceland_copy_bytes_to_smc(hwmgr->smumgr, + data->ulv_settings_start, + (uint8_t *)&(data->ulv_setting), + sizeof(SMU71_Discrete_Ulv), + data->sram_end); + +#if 0 + /* Notify SMC to follow new GPIO scheme */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition)) { + if (0 == iceland_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_UseNewGPIOScheme)) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme); + } +#endif + + return result; +} + +int iceland_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU71_Discrete_MCRegisters *mc_reg_table) +{ + const struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + uint32_t i, j; + + for (i = 0, j = 0; j < data->iceland_mc_reg_table.last; j++) { + if (data->iceland_mc_reg_table.validflag & 1<address[] array out of boundary", return -1); + mc_reg_table->address[i].s0 = + PP_HOST_TO_SMC_US(data->iceland_mc_reg_table.mc_reg_address[j].s0); + mc_reg_table->address[i].s1 = + PP_HOST_TO_SMC_US(data->iceland_mc_reg_table.mc_reg_address[j].s1); + i++; + } + } + + mc_reg_table->last = (uint8_t)i; + + return 0; +} + +/* convert register values from driver to SMC format */ +void iceland_convert_mc_registers( + const phw_iceland_mc_reg_entry * pEntry, + SMU71_Discrete_MCRegisterSet *pData, + uint32_t numEntries, uint32_t validflag) +{ + uint32_t i, j; + + for (i = 0, j = 0; j < numEntries; j++) { + if (validflag & 1<value[i] = PP_HOST_TO_SMC_UL(pEntry->mc_data[j]); + i++; + } + } +} + +/* find the entry in the memory range table, then populate the value to SMC's iceland_mc_reg_table */ +int iceland_convert_mc_reg_table_entry_to_smc( + struct pp_hwmgr *hwmgr, + const uint32_t memory_clock, + SMU71_Discrete_MCRegisterSet *mc_reg_table_data + ) +{ + const iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint32_t i = 0; + + for (i = 0; i < data->iceland_mc_reg_table.num_entries; i++) { + if (memory_clock <= + data->iceland_mc_reg_table.mc_reg_table_entry[i].mclk_max) { + break; + } + } + + if ((i == data->iceland_mc_reg_table.num_entries) && (i > 0)) + --i; + + iceland_convert_mc_registers(&data->iceland_mc_reg_table.mc_reg_table_entry[i], + mc_reg_table_data, data->iceland_mc_reg_table.last, data->iceland_mc_reg_table.validflag); + + return 0; +} + +int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, + SMU71_Discrete_MCRegisters *mc_reg_table) +{ + int result = 0; + iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + int res; + uint32_t i; + + for (i = 0; i < data->dpm_table.mclk_table.count; i++) { + res = iceland_convert_mc_reg_table_entry_to_smc( + hwmgr, + data->dpm_table.mclk_table.dpm_levels[i].value, + &mc_reg_table->data[i] + ); + + if (0 != res) + result = res; + } + + return result; +} + +int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + memset(&data->mc_reg_table, 0x00, sizeof(SMU71_Discrete_MCRegisters)); + result = iceland_populate_mc_reg_address(hwmgr, &(data->mc_reg_table)); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize MCRegTable for the MC register addresses!", return result;); + + result = iceland_convert_mc_reg_table_to_smc(hwmgr, &data->mc_reg_table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize MCRegTable for driver state!", return result;); + + return iceland_copy_bytes_to_smc(hwmgr->smumgr, data->mc_reg_table_start, + (uint8_t *)&data->mc_reg_table, sizeof(SMU71_Discrete_MCRegisters), data->sram_end); +} + +int iceland_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) +{ + PPSMC_Msg msg = has_display? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; + + return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1; +} + +int iceland_enable_sclk_control(struct pp_hwmgr *hwmgr) +{ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, 0); + + return 0; +} + +int iceland_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* enable SCLK dpm */ + if (0 == data->sclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_DPM_Enable)), + "Failed to enable SCLK DPM during DPM Start Function!", + return -1); + } + + /* enable MCLK dpm */ + if (0 == data->mclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_Enable)), + "Failed to enable MCLK DPM during DPM Start Function!", + return -1); + + PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC0_CNTL, 0x05);/* CH0,1 read */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC1_CNTL, 0x05);/* CH2,3 read */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_CPL_CNTL, 0x100005);/*Read */ + + udelay(10); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC0_CNTL, 0x400005);/* CH0,1 write */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC1_CNTL, 0x400005);/* CH2,3 write */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_CPL_CNTL, 0x500005);/* write */ + + } + + return 0; +} + +int iceland_start_dpm(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* enable general power management */ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 1); + /* enable sclk deep sleep */ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 1); + + /* prepare for PCIE DPM */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SOFT_REGISTERS_TABLE_12, VoltageChangeTimeout, 0x1000); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, SWRST_COMMAND_1, RESETLC, 0x0); + +#if 0 + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_Voltage_Cntl_Enable)), + "Failed to enable voltage DPM during DPM Start Function!", + return -1); +#endif + + if (0 != iceland_enable_sclk_mclk_dpm(hwmgr)) { + PP_ASSERT_WITH_CODE(0, "Failed to enable Sclk DPM and Mclk DPM!", return -1); + } + + /* enable PCIE dpm */ + if (0 == data->pcie_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_Enable)), + "Failed to enable pcie DPM during DPM Start Function!", + return -1 + ); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_Falcon_QuickTransition)) { + smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_EnableACDCGPIOInterrupt); + } + + return 0; +} + +static void iceland_set_dpm_event_sources(struct pp_hwmgr *hwmgr, + uint32_t sources) +{ + bool protection; + enum DPM_EVENT_SRC src; + + switch (sources) { + default: + printk(KERN_ERR "Unknown throttling event sources."); + /* fall through */ + case 0: + protection = false; + /* src is unused */ + break; + case (1 << PHM_AutoThrottleSource_Thermal): + protection = true; + src = DPM_EVENT_SRC_DIGITAL; + break; + case (1 << PHM_AutoThrottleSource_External): + protection = true; + src = DPM_EVENT_SRC_EXTERNAL; + break; + case (1 << PHM_AutoThrottleSource_External) | + (1 << PHM_AutoThrottleSource_Thermal): + protection = true; + src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL; + break; + } + /* Order matters - don't enable thermal protection for the wrong source. */ + if (protection) { + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, + DPM_EVENT_SRC, src); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + THERMAL_PROTECTION_DIS, + !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalController)); + } else + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + THERMAL_PROTECTION_DIS, 1); +} + +static int iceland_enable_auto_throttle_source(struct pp_hwmgr *hwmgr, + PHM_AutoThrottleSource source) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + if (!(data->active_auto_throttle_sources & (1 << source))) { + data->active_auto_throttle_sources |= 1 << source; + iceland_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); + } + return 0; +} + +static int iceland_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) +{ + return iceland_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); +} + +static int iceland_tf_start_smc(struct pp_hwmgr *hwmgr) +{ + int ret = 0; + + if (!iceland_is_smc_ram_running(hwmgr->smumgr)) + ret = iceland_smu_start_smc(hwmgr->smumgr); + + return ret; +} + +static int iceland_enable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + int tmp_result, result = 0; + + if (cf_iceland_voltage_control(hwmgr)) { + tmp_result = iceland_enable_voltage_control(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable voltage control!", return tmp_result); + + tmp_result = iceland_construct_voltage_tables(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to contruct voltage tables!", return tmp_result); + } + + tmp_result = iceland_initialize_mc_reg_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize MC reg table!", return tmp_result); + + tmp_result = iceland_program_static_screen_threshold_parameters(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to program static screen threshold parameters!", return tmp_result); + + tmp_result = iceland_enable_display_gap(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable display gap!", return tmp_result); + + tmp_result = iceland_program_voting_clients(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to program voting clients!", return tmp_result); + + tmp_result = iceland_upload_firmware(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to upload firmware header!", return tmp_result); + + tmp_result = iceland_process_firmware_header(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to process firmware header!", return tmp_result); + + tmp_result = iceland_initial_switch_from_arb_f0_to_f1(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize switch from ArbF0 to F1!", return tmp_result); + + tmp_result = iceland_init_smc_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize SMC table!", return tmp_result); + + tmp_result = iceland_populate_initial_mc_reg_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to populate initialize MC Reg table!", return tmp_result); + + tmp_result = iceland_populate_pm_fuses(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to populate PM fuses!", return tmp_result); + + /* start SMC */ + tmp_result = iceland_tf_start_smc(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to start SMC!", return tmp_result); + + /* enable SCLK control */ + tmp_result = iceland_enable_sclk_control(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable SCLK control!", return tmp_result); + + /* enable DPM */ + tmp_result = iceland_start_dpm(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to start DPM!", return tmp_result); + + tmp_result = iceland_enable_smc_cac(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable SMC CAC!", return tmp_result); + + tmp_result = iceland_enable_power_containment(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable power containment!", return tmp_result); + + tmp_result = iceland_power_control_set_level(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to power control set level!", result = tmp_result); + + tmp_result = iceland_enable_thermal_auto_throttle(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable thermal auto throttle!", result = tmp_result); + + return result; +} + +static int iceland_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) +{ + return phm_hwmgr_backend_fini(hwmgr); +} + +static void iceland_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct phw_iceland_ulv_parm *ulv; + + ulv = &data->ulv; + ulv->ch_ulv_parameter = PPICELAND_CGULVPARAMETER_DFLT; + data->voting_rights_clients0 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT0; + data->voting_rights_clients1 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT1; + data->voting_rights_clients2 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT2; + data->voting_rights_clients3 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT3; + data->voting_rights_clients4 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT4; + data->voting_rights_clients5 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT5; + data->voting_rights_clients6 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT6; + data->voting_rights_clients7 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT7; + + data->static_screen_threshold_unit = PPICELAND_STATICSCREENTHRESHOLDUNIT_DFLT; + data->static_screen_threshold = PPICELAND_STATICSCREENTHRESHOLD_DFLT; + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ABM); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_NonABMSupportInPPLib); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicACTiming); + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableMemoryTransition); + + iceland_initialize_power_tune_defaults(hwmgr); + + data->mclk_strobe_mode_threshold = 40000; + data->mclk_stutter_mode_threshold = 30000; + data->mclk_edc_enable_threshold = 40000; + data->mclk_edc_wr_enable_threshold = 40000; + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableMCLS); + + data->pcie_gen_performance.max = PP_PCIEGen1; + data->pcie_gen_performance.min = PP_PCIEGen3; + data->pcie_gen_power_saving.max = PP_PCIEGen1; + data->pcie_gen_power_saving.min = PP_PCIEGen3; + + data->pcie_lane_performance.max = 0; + data->pcie_lane_performance.min = 16; + data->pcie_lane_power_saving.max = 0; + data->pcie_lane_power_saving.min = 16; + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification); +} + +static int iceland_get_evv_voltage(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + uint16_t virtual_voltage_id; + uint16_t vddc = 0; + uint16_t i; + + /* the count indicates actual number of entries */ + data->vddc_leakage.count = 0; + data->vddci_leakage.count = 0; + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) { + pr_err("Iceland should always support EVV\n"); + return -EINVAL; + } + + /* retrieve voltage for leakage ID (0xff01 + i) */ + for (i = 0; i < ICELAND_MAX_LEAKAGE_COUNT; i++) { + virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; + + PP_ASSERT_WITH_CODE((0 == atomctrl_get_voltage_evv(hwmgr, virtual_voltage_id, &vddc)), + "Error retrieving EVV voltage value!\n", continue); + + if (vddc >= 2000) + pr_warning("Invalid VDDC value!\n"); + + if (vddc != 0 && vddc != virtual_voltage_id) { + data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc; + data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id; + data->vddc_leakage.count++; + } + } + + return 0; +} + +static void iceland_patch_with_vddc_leakage(struct pp_hwmgr *hwmgr, + uint32_t *vddc) +{ + iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint32_t leakage_index; + struct phw_iceland_leakage_voltage *leakage_table = &data->vddc_leakage; + + /* search for leakage voltage ID 0xff01 ~ 0xff08 */ + for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { + /* + * If this voltage matches a leakage voltage ID, patch + * with actual leakage voltage. + */ + if (leakage_table->leakage_id[leakage_index] == *vddc) { + /* + * Need to make sure vddc is less than 2v or + * else, it could burn the ASIC. + */ + if (leakage_table->actual_voltage[leakage_index] >= 2000) + pr_warning("Invalid VDDC value!\n"); + *vddc = leakage_table->actual_voltage[leakage_index]; + /* we found leakage voltage */ + break; + } + } + + if (*vddc >= ATOM_VIRTUAL_VOLTAGE_ID0) + pr_warning("Voltage value looks like a Leakage ID but it's not patched\n"); +} + +static void iceland_patch_with_vddci_leakage(struct pp_hwmgr *hwmgr, + uint32_t *vddci) +{ + iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint32_t leakage_index; + struct phw_iceland_leakage_voltage *leakage_table = &data->vddci_leakage; + + /* search for leakage voltage ID 0xff01 ~ 0xff08 */ + for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { + /* + * If this voltage matches a leakage voltage ID, patch + * with actual leakage voltage. + */ + if (leakage_table->leakage_id[leakage_index] == *vddci) { + *vddci = leakage_table->actual_voltage[leakage_index]; + /* we found leakage voltage */ + break; + } + } + + if (*vddci >= ATOM_VIRTUAL_VOLTAGE_ID0) + pr_warning("Voltage value looks like a Leakage ID but it's not patched\n"); +} + +static int iceland_patch_vddc(struct pp_hwmgr *hwmgr, + struct phm_clock_voltage_dependency_table *tab) +{ + uint16_t i; + + if (tab) + for (i = 0; i < tab->count; i++) + iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v); + + return 0; +} + +static int iceland_patch_vddci(struct pp_hwmgr *hwmgr, + struct phm_clock_voltage_dependency_table *tab) +{ + uint16_t i; + + if (tab) + for (i = 0; i < tab->count; i++) + iceland_patch_with_vddci_leakage(hwmgr, &tab->entries[i].v); + + return 0; +} + +static int iceland_patch_vce_vddc(struct pp_hwmgr *hwmgr, + struct phm_vce_clock_voltage_dependency_table *tab) +{ + uint16_t i; + + if (tab) + for (i = 0; i < tab->count; i++) + iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v); + + return 0; +} + + +static int iceland_patch_uvd_vddc(struct pp_hwmgr *hwmgr, + struct phm_uvd_clock_voltage_dependency_table *tab) +{ + uint16_t i; + + if (tab) + for (i = 0; i < tab->count; i++) + iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v); + + return 0; +} + +static int iceland_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr, + struct phm_phase_shedding_limits_table *tab) +{ + uint16_t i; + + if (tab) + for (i = 0; i < tab->count; i++) + iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].Voltage); + + return 0; +} + +static int iceland_patch_samu_vddc(struct pp_hwmgr *hwmgr, + struct phm_samu_clock_voltage_dependency_table *tab) +{ + uint16_t i; + + if (tab) + for (i = 0; i < tab->count; i++) + iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v); + + return 0; +} + +static int iceland_patch_acp_vddc(struct pp_hwmgr *hwmgr, + struct phm_acp_clock_voltage_dependency_table *tab) +{ + uint16_t i; + + if (tab) + for (i = 0; i < tab->count; i++) + iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v); + + return 0; +} + +static int iceland_patch_limits_vddc(struct pp_hwmgr *hwmgr, + struct phm_clock_and_voltage_limits *tab) +{ + if (tab) { + iceland_patch_with_vddc_leakage(hwmgr, (uint32_t *)&tab->vddc); + iceland_patch_with_vddci_leakage(hwmgr, (uint32_t *)&tab->vddci); + } + + return 0; +} + +static int iceland_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab) +{ + uint32_t i; + uint32_t vddc; + + if (tab) { + for (i = 0; i < tab->count; i++) { + vddc = (uint32_t)(tab->entries[i].Vddc); + iceland_patch_with_vddc_leakage(hwmgr, &vddc); + tab->entries[i].Vddc = (uint16_t)vddc; + } + } + + return 0; +} + +static int iceland_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr) +{ + int tmp; + + tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table); + if(tmp) + return -EINVAL; + + return 0; +} + +static int iceland_set_private_var_based_on_pptale(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk; + struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk; + struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk; + + PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL, + "VDDC dependency on SCLK table is missing. This table is mandatory\n", return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1, + "VDDC dependency on SCLK table has to have is missing. This table is mandatory\n", return -EINVAL); + + PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL, + "VDDC dependency on MCLK table is missing. This table is mandatory\n", return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1, + "VDD dependency on MCLK table has to have is missing. This table is mandatory\n", return -EINVAL); + + data->min_vddc_in_pp_table = (uint16_t)allowed_sclk_vddc_table->entries[0].v; + data->max_vddc_in_pp_table = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; + + hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = + allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; + hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = + allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk; + hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = + allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; + + if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) { + data->min_vddci_in_pp_table = (uint16_t)allowed_mclk_vddci_table->entries[0].v; + data->max_vddci_in_pp_table = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; + } + + if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1) + hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v; + + return 0; +} + +static int iceland_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr) +{ + uint32_t table_size; + struct phm_clock_voltage_dependency_table *table_clk_vlt; + + hwmgr->dyn_state.mclk_sclk_ratio = 4; + hwmgr->dyn_state.sclk_mclk_delta = 15000; /* 150 MHz */ + hwmgr->dyn_state.vddc_vddci_delta = 200; /* 200mV */ + + /* initialize vddc_dep_on_dal_pwrl table */ + table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record); + table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL); + + if (NULL == table_clk_vlt) { + pr_err("[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n"); + return -ENOMEM; + } else { + table_clk_vlt->count = 4; + table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW; + table_clk_vlt->entries[0].v = 0; + table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW; + table_clk_vlt->entries[1].v = 720; + table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL; + table_clk_vlt->entries[2].v = 810; + table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE; + table_clk_vlt->entries[3].v = 900; + hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt; + } + + return 0; +} + +/** + * Initializes the Volcanic Islands Hardware Manager + * + * @param hwmgr the address of the powerplay hardware manager. + * @return 1 if success; otherwise appropriate error code. + */ +static int iceland_hwmgr_backend_init(struct pp_hwmgr *hwmgr) +{ + int result = 0; + SMU71_Discrete_DpmTable *table = NULL; + iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; + bool stay_in_boot; + struct phw_iceland_ulv_parm *ulv; + struct cgs_system_info sys_info = {0}; + + PP_ASSERT_WITH_CODE((NULL != hwmgr), + "Invalid Parameter!", return -EINVAL;); + + data->dll_defaule_on = 0; + data->sram_end = SMC_RAM_END; + + data->activity_target[0] = PPICELAND_TARGETACTIVITY_DFLT; + data->activity_target[1] = PPICELAND_TARGETACTIVITY_DFLT; + data->activity_target[2] = PPICELAND_TARGETACTIVITY_DFLT; + data->activity_target[3] = PPICELAND_TARGETACTIVITY_DFLT; + data->activity_target[4] = PPICELAND_TARGETACTIVITY_DFLT; + data->activity_target[5] = PPICELAND_TARGETACTIVITY_DFLT; + data->activity_target[6] = PPICELAND_TARGETACTIVITY_DFLT; + data->activity_target[7] = PPICELAND_TARGETACTIVITY_DFLT; + + data->mclk_activity_target = PPICELAND_MCLK_TARGETACTIVITY_DFLT; + + data->sclk_dpm_key_disabled = 0; + data->mclk_dpm_key_disabled = 0; + data->pcie_dpm_key_disabled = 0; + data->pcc_monitor_enabled = 0; + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UnTabledHardwareInterface); + + data->gpio_debug = 0; + data->engine_clock_data = 0; + data->memory_clock_data = 0; + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep); + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleepAboveLow); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicPatchPowerState); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface); + + /* Initializes DPM default values. */ + iceland_initialize_dpm_defaults(hwmgr); + + /* Enable Platform EVV support. */ + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EVV); + + /* Get leakage voltage based on leakage ID. */ + result = iceland_get_evv_voltage(hwmgr); + if (result) + goto failed; + + /** + * Patch our voltage dependency table with actual leakage + * voltage. We need to perform leakage translation before it's + * used by other functions such as + * iceland_set_hwmgr_variables_based_on_pptable. + */ + result = iceland_patch_dependency_tables_with_leakage(hwmgr); + if (result) + goto failed; + + /* Parse pptable data read from VBIOS. */ + result = iceland_set_private_var_based_on_pptale(hwmgr); + if (result) + goto failed; + + /* ULV support */ + ulv = &(data->ulv); + ulv->ulv_supported = 1; + + /* Initalize Dynamic State Adjustment Rule Settings*/ + result = iceland_initializa_dynamic_state_adjustment_rule_settings(hwmgr); + if (result) { + pr_err("[ powerplay ] iceland_initializa_dynamic_state_adjustment_rule_settings failed!\n"); + goto failed; + } + + data->voltage_control = ICELAND_VOLTAGE_CONTROL_NONE; + data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_NONE; + data->mvdd_control = ICELAND_VOLTAGE_CONTROL_NONE; + + /* + * Hardcode thermal temperature settings for now, these will + * be overwritten if a custom policy exists. + */ + data->thermal_temp_setting.temperature_low = 99500; + data->thermal_temp_setting.temperature_high = 100000; + data->thermal_temp_setting.temperature_shutdown = 104000; + data->uvd_enabled = false; + + table = &data->smc_state_table; + + if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, + &gpio_pin_assignment)) { + table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } else { + table->VRHotGpio = ICELAND_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } + + if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID, + &gpio_pin_assignment)) { + table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } else { + table->AcDcGpio = ICELAND_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } + + /* + * If ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, Peak. + * Current Control feature is enabled and we should program + * PCC HW register + */ + if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, + &gpio_pin_assignment)) { + uint32_t temp_reg = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, + ixCNB_PWRMGT_CNTL); + + switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) { + case 0: + temp_reg = PHM_SET_FIELD(temp_reg, + CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1); + break; + case 1: + temp_reg = PHM_SET_FIELD(temp_reg, + CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2); + break; + case 2: + temp_reg = PHM_SET_FIELD(temp_reg, + CNB_PWRMGT_CNTL, GNB_SLOW, 0x1); + break; + case 3: + temp_reg = PHM_SET_FIELD(temp_reg, + CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1); + break; + case 4: + temp_reg = PHM_SET_FIELD(temp_reg, + CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1); + break; + default: + pr_warning("[ powerplay ] Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!\n"); + break; + } + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCNB_PWRMGT_CNTL, temp_reg); + } + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableSMU7ThermalManagement); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SMU7); + + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDC, + VOLTAGE_OBJ_GPIO_LUT)) + data->voltage_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO; + else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDC, + VOLTAGE_OBJ_SVID2)) + data->voltage_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDCI)) { + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDCI, + VOLTAGE_OBJ_GPIO_LUT)) + data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO; + else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDCI, + VOLTAGE_OBJ_SVID2)) + data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2; + } + + if (data->vdd_ci_control == ICELAND_VOLTAGE_CONTROL_NONE) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDCI); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableMVDDControl)) { + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_MVDDC, + VOLTAGE_OBJ_GPIO_LUT)) + data->mvdd_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO; + else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_MVDDC, + VOLTAGE_OBJ_SVID2)) + data->mvdd_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2; + } + + if (data->mvdd_control == ICELAND_VOLTAGE_CONTROL_NONE) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableMVDDControl); + + data->vddc_phase_shed_control = false; + + stay_in_boot = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StayInBootState); + + /* iceland doesn't support UVD and VCE */ + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDPowerGating); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEPowerGating); + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS; + result = cgs_query_system_info(hwmgr->device, &sys_info); + if (!result) { + if (sys_info.value & AMD_PG_SUPPORT_UVD) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDPowerGating); + if (sys_info.value & AMD_PG_SUPPORT_VCE) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEPowerGating); + + data->is_tlu_enabled = false; + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = + ICELAND_MAX_HARDWARE_POWERLEVELS; + hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; + hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; + result = cgs_query_system_info(hwmgr->device, &sys_info); + if (result) + data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK; + else + data->pcie_gen_cap = (uint32_t)sys_info.value; + if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) + data->pcie_spc_cap = 20; + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; + result = cgs_query_system_info(hwmgr->device, &sys_info); + if (result) + data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK; + else + data->pcie_lane_cap = (uint32_t)sys_info.value; + } else { + /* Ignore return value in here, we are cleaning up a mess. */ + iceland_hwmgr_backend_fini(hwmgr); + } + + return 0; +failed: + return result; +} + +static int iceland_get_num_of_entries(struct pp_hwmgr *hwmgr) +{ + int result; + unsigned long ret = 0; + + result = pp_tables_get_num_of_entries(hwmgr, &ret); + + return result ? 0 : ret; +} + +static const unsigned long PhwIceland_Magic = (unsigned long)(PHM_VIslands_Magic); + +struct iceland_power_state *cast_phw_iceland_power_state( + struct pp_hw_power_state *hw_ps) +{ + if (hw_ps == NULL) + return NULL; + + PP_ASSERT_WITH_CODE((PhwIceland_Magic == hw_ps->magic), + "Invalid Powerstate Type!", + return NULL); + + return (struct iceland_power_state *)hw_ps; +} + +static int iceland_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, + struct pp_power_state *prequest_ps, + const struct pp_power_state *pcurrent_ps) +{ + struct iceland_power_state *iceland_ps = + cast_phw_iceland_power_state(&prequest_ps->hardware); + + uint32_t sclk; + uint32_t mclk; + struct PP_Clocks minimum_clocks = {0}; + bool disable_mclk_switching; + bool disable_mclk_switching_for_frame_lock; + struct cgs_display_info info = {0}; + const struct phm_clock_and_voltage_limits *max_limits; + uint32_t i; + iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + int32_t count; + int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; + + data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); + + PP_ASSERT_WITH_CODE(iceland_ps->performance_level_count == 2, + "VI should always have 2 performance levels", + ); + + max_limits = (PP_PowerSource_AC == hwmgr->power_source) ? + &(hwmgr->dyn_state.max_clock_voltage_on_ac) : + &(hwmgr->dyn_state.max_clock_voltage_on_dc); + + if (PP_PowerSource_DC == hwmgr->power_source) { + for (i = 0; i < iceland_ps->performance_level_count; i++) { + if (iceland_ps->performance_levels[i].memory_clock > max_limits->mclk) + iceland_ps->performance_levels[i].memory_clock = max_limits->mclk; + if (iceland_ps->performance_levels[i].engine_clock > max_limits->sclk) + iceland_ps->performance_levels[i].engine_clock = max_limits->sclk; + } + } + + iceland_ps->vce_clocks.EVCLK = hwmgr->vce_arbiter.evclk; + iceland_ps->vce_clocks.ECCLK = hwmgr->vce_arbiter.ecclk; + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) { + + max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); + stable_pstate_sclk = (max_limits->sclk * 75) / 100; + + for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; count >= 0; count--) { + if (stable_pstate_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) { + stable_pstate_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk; + break; + } + } + + if (count < 0) + stable_pstate_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk; + + stable_pstate_mclk = max_limits->mclk; + + minimum_clocks.engineClock = stable_pstate_sclk; + minimum_clocks.memoryClock = stable_pstate_mclk; + } + + if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk) + minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk; + + if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk) + minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk; + + iceland_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold; + + if (0 != hwmgr->gfx_arbiter.sclk_over_drive) { + PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.engineClock), + "Overdrive sclk exceeds limit", + hwmgr->gfx_arbiter.sclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.engineClock); + + if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk) + iceland_ps->performance_levels[1].engine_clock = hwmgr->gfx_arbiter.sclk_over_drive; + } + + if (0 != hwmgr->gfx_arbiter.mclk_over_drive) { + PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.memoryClock), + "Overdrive mclk exceeds limit", + hwmgr->gfx_arbiter.mclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.memoryClock); + + if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk) + iceland_ps->performance_levels[1].memory_clock = hwmgr->gfx_arbiter.mclk_over_drive; + } + + disable_mclk_switching_for_frame_lock = phm_cap_enabled( + hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); + + disable_mclk_switching = (1 < info.display_count) || + disable_mclk_switching_for_frame_lock; + + sclk = iceland_ps->performance_levels[0].engine_clock; + mclk = iceland_ps->performance_levels[0].memory_clock; + + if (disable_mclk_switching) + mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count - 1].memory_clock; + + if (sclk < minimum_clocks.engineClock) + sclk = (minimum_clocks.engineClock > max_limits->sclk) ? max_limits->sclk : minimum_clocks.engineClock; + + if (mclk < minimum_clocks.memoryClock) + mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? max_limits->mclk : minimum_clocks.memoryClock; + + iceland_ps->performance_levels[0].engine_clock = sclk; + iceland_ps->performance_levels[0].memory_clock = mclk; + + iceland_ps->performance_levels[1].engine_clock = + (iceland_ps->performance_levels[1].engine_clock >= iceland_ps->performance_levels[0].engine_clock) ? + iceland_ps->performance_levels[1].engine_clock : + iceland_ps->performance_levels[0].engine_clock; + + if (disable_mclk_switching) { + if (mclk < iceland_ps->performance_levels[1].memory_clock) + mclk = iceland_ps->performance_levels[1].memory_clock; + + iceland_ps->performance_levels[0].memory_clock = mclk; + iceland_ps->performance_levels[1].memory_clock = mclk; + } else { + if (iceland_ps->performance_levels[1].memory_clock < iceland_ps->performance_levels[0].memory_clock) + iceland_ps->performance_levels[1].memory_clock = iceland_ps->performance_levels[0].memory_clock; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) { + for (i=0; i < iceland_ps->performance_level_count; i++) { + iceland_ps->performance_levels[i].engine_clock = stable_pstate_sclk; + iceland_ps->performance_levels[i].memory_clock = stable_pstate_mclk; + iceland_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max; + iceland_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max; + } + } + + return 0; +} + +static bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + /* + * We return the status of Voltage Control instead of checking SCLK/MCLK DPM + * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM, + * whereas voltage control is a fundemental change that will not be disabled + */ + return (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) ? 1 : 0); +} + +/** + * force DPM power State + * + * @param hwmgr: the address of the powerplay hardware manager. + * @param n : DPM level + * @return The response that came from the SMC. + */ +int iceland_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ + PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr), + "Trying to force SCLK when DPM is disabled", return -1;); + if (0 == data->sclk_dpm_key_disabled) + return (0 == smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_DPM_ForceState, + n) ? 0 : 1); + + return 0; +} + +/** + * force DPM power State + * + * @param hwmgr: the address of the powerplay hardware manager. + * @param n : DPM level + * @return The response that came from the SMC. + */ +int iceland_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ + PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr), + "Trying to Force MCLK when DPM is disabled", return -1;); + if (0 == data->mclk_dpm_key_disabled) + return (0 == smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_ForceState, + n) ? 0 : 1); + + return 0; +} + +/** + * force DPM power State + * + * @param hwmgr: the address of the powerplay hardware manager. + * @param n : DPM level + * @return The response that came from the SMC. + */ +int iceland_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ + PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr), + "Trying to Force PCIE level when DPM is disabled", return -1;); + if (0 == data->pcie_dpm_key_disabled) + return (0 == smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_ForceLevel, + n) ? 0 : 1); + + return 0; +} + +static int iceland_force_dpm_highest(struct pp_hwmgr *hwmgr) +{ + uint32_t level, tmp; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + if (0 == data->sclk_dpm_key_disabled) { + /* SCLK */ + if (data->dpm_level_enable_mask.sclk_dpm_enable_mask != 0) { + level = 0; + tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; + while (tmp >>= 1) + level++ ; + + if (0 != level) { + PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state(hwmgr, level)), + "force highest sclk dpm state failed!", return -1); + PHM_WAIT_INDIRECT_FIELD(hwmgr->device, + SMC_IND, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX, level); + } + } + } + + if (0 == data->mclk_dpm_key_disabled) { + /* MCLK */ + if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) { + level = 0; + tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask; + while (tmp >>= 1) + level++ ; + + if (0 != level) { + PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state_mclk(hwmgr, level)), + "force highest mclk dpm state failed!", return -1); + PHM_WAIT_INDIRECT_FIELD(hwmgr->device, SMC_IND, + TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX, level); + } + } + } + + if (0 == data->pcie_dpm_key_disabled) { + /* PCIE */ + if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) { + level = 0; + tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask; + while (tmp >>= 1) + level++ ; + + if (0 != level) { + PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state_pcie(hwmgr, level)), + "force highest pcie dpm state failed!", return -1); + } + } + } + + return 0; +} + +static uint32_t iceland_get_lowest_enable_level(struct pp_hwmgr *hwmgr, + uint32_t level_mask) +{ + uint32_t level = 0; + + while (0 == (level_mask & (1 << level))) + level++; + + return level; +} + +static int iceland_force_dpm_lowest(struct pp_hwmgr *hwmgr) +{ + uint32_t level; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* for now force only sclk */ + if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) { + level = iceland_get_lowest_enable_level(hwmgr, + data->dpm_level_enable_mask.sclk_dpm_enable_mask); + + PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state(hwmgr, level)), + "force sclk dpm state failed!", return -1); + + PHM_WAIT_INDIRECT_FIELD(hwmgr->device, SMC_IND, + TARGET_AND_CURRENT_PROFILE_INDEX, + CURR_SCLK_INDEX, + level); + } + + return 0; +} + +int iceland_unforce_dpm_levels(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + +#if 0 + PP_ASSERT_WITH_CODE (0 == iceland_is_dpm_running(hwmgr), + "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.", + return -1); +#endif + + if (0 == data->sclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc( + hwmgr->smumgr, + PPSMC_MSG_NoForcedLevel)), + "unforce sclk dpm state failed!", + return -1); + } + + if (0 == data->pcie_dpm_key_disabled) { + PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc( + hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_NoForcedLevel)), + "unforce mclk dpm state failed!", + return -1); + } + + if (0 == data->pcie_dpm_key_disabled) { + PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc( + hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_UnForceLevel)), + "unforce pcie level failed!", + return -1); + } + + return 0; +} + +static int iceland_force_dpm_level(struct pp_hwmgr *hwmgr, + enum amd_dpm_forced_level level) +{ + int ret = 0; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + ret = iceland_force_dpm_highest(hwmgr); + if (ret) + return ret; + break; + case AMD_DPM_FORCED_LEVEL_LOW: + ret = iceland_force_dpm_lowest(hwmgr); + if (ret) + return ret; + break; + case AMD_DPM_FORCED_LEVEL_AUTO: + ret = iceland_unforce_dpm_levels(hwmgr); + if (ret) + return ret; + break; + default: + break; + } + + hwmgr->dpm_level = level; + return ret; +} + +const struct iceland_power_state *cast_const_phw_iceland_power_state( + const struct pp_hw_power_state *hw_ps) +{ + if (hw_ps == NULL) + return NULL; + + PP_ASSERT_WITH_CODE((PhwIceland_Magic == hw_ps->magic), + "Invalid Powerstate Type!", + return NULL); + + return (const struct iceland_power_state *)hw_ps; +} + +static int iceland_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; + const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state); + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_single_dpm_table *psclk_table = &(data->dpm_table.sclk_table); + uint32_t sclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock; + struct iceland_single_dpm_table *pmclk_table = &(data->dpm_table.mclk_table); + uint32_t mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock; + struct PP_Clocks min_clocks = {0}; + uint32_t i; + struct cgs_display_info info = {0}; + + data->need_update_smu7_dpm_table = 0; + + for (i = 0; i < psclk_table->count; i++) { + if (sclk == psclk_table->dpm_levels[i].value) + break; + } + + if (i >= psclk_table->count) + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + else { + /* + * TODO: Check SCLK in DAL's minimum clocks in case DeepSleep + * divider update is required. + */ + if(data->display_timing.min_clock_insr != min_clocks.engineClockInSR) + data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; + } + + for (i = 0; i < pmclk_table->count; i++) { + if (mclk == pmclk_table->dpm_levels[i].value) + break; + } + + if (i >= pmclk_table->count) + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (data->display_timing.num_existing_displays != info.display_count) + data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; + + return 0; +} + +static uint16_t iceland_get_maximum_link_speed(struct pp_hwmgr *hwmgr, const struct iceland_power_state *hw_ps) +{ + uint32_t i; + uint32_t pcie_speed, max_speed = 0; + + for (i = 0; i < hw_ps->performance_level_count; i++) { + pcie_speed = hw_ps->performance_levels[i].pcie_gen; + if (max_speed < pcie_speed) + max_speed = pcie_speed; + } + + return max_speed; +} + +static uint16_t iceland_get_current_pcie_speed(struct pp_hwmgr *hwmgr) +{ + uint32_t speed_cntl = 0; + + speed_cntl = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__PCIE, + ixPCIE_LC_SPEED_CNTL); + return((uint16_t)PHM_GET_FIELD(speed_cntl, + PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); +} + + +static int iceland_request_link_speed_change_before_state_change(struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + const struct iceland_power_state *iceland_nps = cast_const_phw_iceland_power_state(states->pnew_state); + const struct iceland_power_state *iceland_cps = cast_const_phw_iceland_power_state(states->pcurrent_state); + + uint16_t target_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_nps); + uint16_t current_link_speed; + + if (data->force_pcie_gen == PP_PCIEGenInvalid) + current_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_cps); + else + current_link_speed = data->force_pcie_gen; + + data->force_pcie_gen = PP_PCIEGenInvalid; + data->pspp_notify_required = false; + if (target_link_speed > current_link_speed) { + switch(target_link_speed) { + case PP_PCIEGen3: + if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false)) + break; + data->force_pcie_gen = PP_PCIEGen2; + if (current_link_speed == PP_PCIEGen2) + break; + case PP_PCIEGen2: + if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false)) + break; + default: + data->force_pcie_gen = iceland_get_current_pcie_speed(hwmgr); + break; + } + } else { + if (target_link_speed < current_link_speed) + data->pspp_notify_required = true; + } + + return 0; +} + +static int iceland_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + if (0 == data->need_update_smu7_dpm_table) + return 0; + + if ((0 == data->sclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { + PP_ASSERT_WITH_CODE( + 0 == iceland_is_dpm_running(hwmgr), + "Trying to freeze SCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE( + 0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_FreezeLevel), + "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", + return -1); + } + + if ((0 == data->mclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & + DPMTABLE_OD_UPDATE_MCLK)) { + PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr), + "Trying to freeze MCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE( + 0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_FreezeLevel), + "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", + return -1); + } + + return 0; +} + +static int iceland_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr, const void *input) +{ + int result = 0; + + const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; + const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state); + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint32_t sclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock; + uint32_t mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock; + struct iceland_dpm_table *pdpm_table = &data->dpm_table; + + struct iceland_dpm_table *pgolden_dpm_table = &data->golden_dpm_table; + uint32_t dpm_count, clock_percent; + uint32_t i; + + if (0 == data->need_update_smu7_dpm_table) + return 0; + + if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { + pdpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value = sclk; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { + /* + * Need to do calculation based on the golden DPM table + * as the Heatmap GPU Clock axis is also based on the default values + */ + PP_ASSERT_WITH_CODE( + (pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value != 0), + "Divide by 0!", + return -1); + dpm_count = pdpm_table->sclk_table.count < 2 ? 0 : pdpm_table->sclk_table.count-2; + for (i = dpm_count; i > 1; i--) { + if (sclk > pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value) { + clock_percent = ((sclk - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value)*100) / + pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value; + + pdpm_table->sclk_table.dpm_levels[i].value = + pgolden_dpm_table->sclk_table.dpm_levels[i].value + + (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100; + + } else if (pgolden_dpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value > sclk) { + clock_percent = ((pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value - sclk)*100) / + pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value; + + pdpm_table->sclk_table.dpm_levels[i].value = + pgolden_dpm_table->sclk_table.dpm_levels[i].value - + (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100; + } else + pdpm_table->sclk_table.dpm_levels[i].value = + pgolden_dpm_table->sclk_table.dpm_levels[i].value; + } + } + } + + if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { + pdpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value = mclk; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { + + PP_ASSERT_WITH_CODE( + (pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value != 0), + "Divide by 0!", + return -1); + dpm_count = pdpm_table->mclk_table.count < 2? 0 : pdpm_table->mclk_table.count-2; + for (i = dpm_count; i > 1; i--) { + if (mclk > pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value) { + clock_percent = ((mclk - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value)*100) / + pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value; + + pdpm_table->mclk_table.dpm_levels[i].value = + pgolden_dpm_table->mclk_table.dpm_levels[i].value + + (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100; + + } else if (pgolden_dpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value > mclk) { + clock_percent = ((pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value - mclk)*100) / + pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value; + + pdpm_table->mclk_table.dpm_levels[i].value = + pgolden_dpm_table->mclk_table.dpm_levels[i].value - + (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100; + } else + pdpm_table->mclk_table.dpm_levels[i].value = pgolden_dpm_table->mclk_table.dpm_levels[i].value; + } + } + } + + + if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { + result = iceland_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", + return result); + } + + if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { + /*populate MCLK dpm table to SMU7 */ + result = iceland_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", + return result); + } + + return result; +} + +static int iceland_trim_single_dpm_states(struct pp_hwmgr *hwmgr, + struct iceland_single_dpm_table *pdpm_table, + uint32_t low_limit, uint32_t high_limit) +{ + uint32_t i; + + for (i = 0; i < pdpm_table->count; i++) { + if ((pdpm_table->dpm_levels[i].value < low_limit) || + (pdpm_table->dpm_levels[i].value > high_limit)) + pdpm_table->dpm_levels[i].enabled = false; + else + pdpm_table->dpm_levels[i].enabled = true; + } + return 0; +} + +static int iceland_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct iceland_power_state *hw_state) +{ + int result = 0; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint32_t high_limit_count; + + PP_ASSERT_WITH_CODE((hw_state->performance_level_count >= 1), + "power state did not have any performance level", + return -1); + + high_limit_count = (1 == hw_state->performance_level_count) ? 0: 1; + + iceland_trim_single_dpm_states(hwmgr, &(data->dpm_table.sclk_table), + hw_state->performance_levels[0].engine_clock, + hw_state->performance_levels[high_limit_count].engine_clock); + + iceland_trim_single_dpm_states(hwmgr, &(data->dpm_table.mclk_table), + hw_state->performance_levels[0].memory_clock, + hw_state->performance_levels[high_limit_count].memory_clock); + + return result; +} + +static int iceland_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input) +{ + int result; + const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state); + + result = iceland_trim_dpm_states(hwmgr, iceland_ps); + if (0 != result) + return result; + + data->dpm_level_enable_mask.sclk_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table); + data->dpm_level_enable_mask.mclk_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table); + data->last_mclk_dpm_enable_mask = data->dpm_level_enable_mask.mclk_dpm_enable_mask; + if (data->uvd_enabled && (data->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)) + data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; + + data->dpm_level_enable_mask.pcie_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table); + + return 0; +} + +static int iceland_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input) +{ + return 0; +} + +int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + int result = 0; + uint32_t low_sclk_interrupt_threshold = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification) + && (hwmgr->gfx_arbiter.sclk_threshold != data->low_sclk_interrupt_threshold)) { + data->low_sclk_interrupt_threshold = hwmgr->gfx_arbiter.sclk_threshold; + low_sclk_interrupt_threshold = data->low_sclk_interrupt_threshold; + + CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); + + result = iceland_copy_bytes_to_smc( + hwmgr->smumgr, + data->dpm_table_start + offsetof(SMU71_Discrete_DpmTable, + LowSclkInterruptThreshold), + (uint8_t *)&low_sclk_interrupt_threshold, + sizeof(uint32_t), + data->sram_end + ); + } + + return result; +} + +static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + uint32_t address; + int32_t result; + + if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) + return 0; + + + memset(&data->mc_reg_table, 0, sizeof(SMU71_Discrete_MCRegisters)); + + result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(data->mc_reg_table)); + + if(result != 0) + return result; + + + address = data->mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]); + + return iceland_copy_bytes_to_smc(hwmgr->smumgr, address, + (uint8_t *)&data->mc_reg_table.data[0], + sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count, + data->sram_end); +} + +static int iceland_program_memory_timing_parameters_conditionally(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) + return iceland_program_memory_timing_parameters(hwmgr); + + return 0; +} + +static int iceland_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + if (0 == data->need_update_smu7_dpm_table) + return 0; + + if ((0 == data->sclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { + + PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr), + "Trying to Unfreeze SCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE( + 0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_UnfreezeLevel), + "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", + return -1); + } + + if ((0 == data->mclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { + + PP_ASSERT_WITH_CODE( + 0 == iceland_is_dpm_running(hwmgr), + "Trying to Unfreeze MCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE( + 0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_UnfreezeLevel), + "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", + return -1); + } + + data->need_update_smu7_dpm_table = 0; + + return 0; +} + +static int iceland_notify_link_speed_change_after_state_change(struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state); + uint16_t target_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_ps); + uint8_t request; + + if (data->pspp_notify_required || + data->pcie_performance_request) { + if (target_link_speed == PP_PCIEGen3) + request = PCIE_PERF_REQ_GEN3; + else if (target_link_speed == PP_PCIEGen2) + request = PCIE_PERF_REQ_GEN2; + else + request = PCIE_PERF_REQ_GEN1; + + if(request == PCIE_PERF_REQ_GEN1 && iceland_get_current_pcie_speed(hwmgr) > 0) { + data->pcie_performance_request = false; + return 0; + } + + if (0 != acpi_pcie_perf_request(hwmgr->device, request, false)) { + if (PP_PCIEGen2 == target_link_speed) + printk("PSPP request to switch to Gen2 from Gen3 Failed!"); + else + printk("PSPP request to switch to Gen1 from Gen2 Failed!"); + } + } + + data->pcie_performance_request = false; + return 0; +} + +int iceland_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) +{ + PPSMC_Result result; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + if (0 == data->sclk_dpm_key_disabled) { + /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ + if (0 != iceland_is_dpm_running(hwmgr)) + printk(KERN_ERR "[ powerplay ] Trying to set Enable Sclk Mask when DPM is disabled \n"); + + if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) { + result = smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + (PPSMC_Msg)PPSMC_MSG_SCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.sclk_dpm_enable_mask); + PP_ASSERT_WITH_CODE((0 == result), + "Set Sclk Dpm enable Mask failed", return -1); + } + } + + if (0 == data->mclk_dpm_key_disabled) { + /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ + if (0 != iceland_is_dpm_running(hwmgr)) + printk(KERN_ERR "[ powerplay ] Trying to set Enable Mclk Mask when DPM is disabled \n"); + + if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) { + result = smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + (PPSMC_Msg)PPSMC_MSG_MCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.mclk_dpm_enable_mask); + PP_ASSERT_WITH_CODE((0 == result), + "Set Mclk Dpm enable Mask failed", return -1); + } + } + + return 0; +} + +static int iceland_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) +{ + int tmp_result, result = 0; + + tmp_result = iceland_find_dpm_states_clocks_in_dpm_table(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to find DPM states clocks in DPM table!", result = tmp_result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) { + tmp_result = iceland_request_link_speed_change_before_state_change(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to request link speed change before state change!", result = tmp_result); + } + + tmp_result = iceland_freeze_sclk_mclk_dpm(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to freeze SCLK MCLK DPM!", result = tmp_result); + + tmp_result = iceland_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result); + + tmp_result = iceland_generate_dpm_level_enable_mask(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to generate DPM level enabled mask!", result = tmp_result); + + tmp_result = iceland_update_vce_dpm(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update VCE DPM!", result = tmp_result); + + tmp_result = iceland_update_sclk_threshold(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update SCLK threshold!", result = tmp_result); + + tmp_result = iceland_update_and_upload_mc_reg_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload MC reg table!", result = tmp_result); + + tmp_result = iceland_program_memory_timing_parameters_conditionally(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to program memory timing parameters!", result = tmp_result); + + tmp_result = iceland_unfreeze_sclk_mclk_dpm(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to unfreeze SCLK MCLK DPM!", result = tmp_result); + + tmp_result = iceland_upload_dpm_level_enable_mask(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload DPM level enabled mask!", result = tmp_result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) { + tmp_result = iceland_notify_link_speed_change_after_state_change(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to notify link speed change after state change!", result = tmp_result); + } + + return result; +} + +static int iceland_get_power_state_size(struct pp_hwmgr *hwmgr) +{ + return sizeof(struct iceland_power_state); +} + +static int iceland_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) +{ + struct pp_power_state *ps; + struct iceland_power_state *iceland_ps; + + if (hwmgr == NULL) + return -EINVAL; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + iceland_ps = cast_phw_iceland_power_state(&ps->hardware); + + if (low) + return iceland_ps->performance_levels[0].memory_clock; + else + return iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock; +} + +static int iceland_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) +{ + struct pp_power_state *ps; + struct iceland_power_state *iceland_ps; + + if (hwmgr == NULL) + return -EINVAL; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + iceland_ps = cast_phw_iceland_power_state(&ps->hardware); + + if (low) + return iceland_ps->performance_levels[0].engine_clock; + else + return iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock; +} + +static int iceland_get_current_pcie_lane_number( + struct pp_hwmgr *hwmgr) +{ + uint32_t link_width; + + link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__PCIE, + PCIE_LC_LINK_WIDTH_CNTL, + LC_LINK_WIDTH_RD); + + PP_ASSERT_WITH_CODE((7 >= link_width), + "Invalid PCIe lane width!", return 0); + + return decode_pcie_lane_width(link_width); +} + +static int iceland_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, + struct pp_hw_power_state *hw_ps) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_power_state *ps = (struct iceland_power_state *)hw_ps; + ATOM_FIRMWARE_INFO_V2_2 *fw_info; + uint16_t size; + uint8_t frev, crev; + int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); + + /* First retrieve the Boot clocks and VDDC from the firmware info table. + * We assume here that fw_info is unchanged if this call fails. + */ + fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table( + hwmgr->device, index, + &size, &frev, &crev); + if (!fw_info) + /* During a test, there is no firmware info table. */ + return 0; + + /* Patch the state. */ + data->vbios_boot_state.sclk_bootup_value = le32_to_cpu(fw_info->ulDefaultEngineClock); + data->vbios_boot_state.mclk_bootup_value = le32_to_cpu(fw_info->ulDefaultMemoryClock); + data->vbios_boot_state.mvdd_bootup_value = le16_to_cpu(fw_info->usBootUpMVDDCVoltage); + data->vbios_boot_state.vddc_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCVoltage); + data->vbios_boot_state.vddci_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCIVoltage); + data->vbios_boot_state.pcie_gen_bootup_value = iceland_get_current_pcie_speed(hwmgr); + data->vbios_boot_state.pcie_lane_bootup_value = + (uint16_t)iceland_get_current_pcie_lane_number(hwmgr); + + /* set boot power state */ + ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value; + ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value; + ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value; + ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value; + + return 0; +} + +static int iceland_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, + struct pp_hw_power_state *power_state, + unsigned int index, const void *clock_info) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_power_state *iceland_power_state = cast_phw_iceland_power_state(power_state); + const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info; + struct iceland_performance_level *performance_level; + uint32_t engine_clock, memory_clock; + uint16_t pcie_gen_from_bios; + + engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow; + memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow; + + if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk) + data->highest_mclk = memory_clock; + + performance_level = &(iceland_power_state->performance_levels + [iceland_power_state->performance_level_count++]); + + PP_ASSERT_WITH_CODE( + (iceland_power_state->performance_level_count < SMU71_MAX_LEVELS_GRAPHICS), + "Performance levels exceeds SMC limit!", + return -1); + + PP_ASSERT_WITH_CODE( + (iceland_power_state->performance_level_count <= + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), + "Performance levels exceeds Driver limit!", + return -1); + + /* Performance levels are arranged from low to high. */ + performance_level->memory_clock = memory_clock; + performance_level->engine_clock = engine_clock; + + pcie_gen_from_bios = visland_clk_info->ucPCIEGen; + + performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios); + performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane); + + return 0; +} + +static int iceland_get_pp_table_entry(struct pp_hwmgr *hwmgr, + unsigned long entry_index, struct pp_power_state *state) +{ + int result; + struct iceland_power_state *ps; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct phm_clock_voltage_dependency_table *dep_mclk_table = + hwmgr->dyn_state.vddci_dependency_on_mclk; + + memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state)); + + state->hardware.magic = PHM_VIslands_Magic; + + ps = (struct iceland_power_state *)(&state->hardware); + + result = pp_tables_get_entry(hwmgr, entry_index, state, + iceland_get_pp_table_entry_callback_func); + + /* + * This is the earliest time we have all the dependency table + * and the VBIOS boot state as + * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot + * state if there is only one VDDCI/MCLK level, check if it's + * the same as VBIOS boot state + */ + if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { + if (dep_mclk_table->entries[0].clk != + data->vbios_boot_state.mclk_bootup_value) + printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table " + "does not match VBIOS boot MCLK level"); + if (dep_mclk_table->entries[0].v != + data->vbios_boot_state.vddci_bootup_value) + printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table " + "does not match VBIOS boot VDDCI level"); + } + + /* set DC compatible flag if this state supports DC */ + if (!state->validation.disallowOnDC) + ps->dc_compatible = true; + + if (state->classification.flags & PP_StateClassificationFlag_ACPI) + data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; + else if (0 != (state->classification.flags & PP_StateClassificationFlag_Boot)) { + if (data->bacos.best_match == 0xffff) { + /* For C.I. use boot state as base BACO state */ + data->bacos.best_match = PP_StateClassificationFlag_Boot; + data->bacos.performance_level = ps->performance_levels[0]; + } + } + + + ps->uvd_clocks.VCLK = state->uvd_clocks.VCLK; + ps->uvd_clocks.DCLK = state->uvd_clocks.DCLK; + + if (!result) { + uint32_t i; + + switch (state->classification.ui_label) { + case PP_StateUILabel_Performance: + data->use_pcie_performance_levels = true; + + for (i = 0; i < ps->performance_level_count; i++) { + if (data->pcie_gen_performance.max < + ps->performance_levels[i].pcie_gen) + data->pcie_gen_performance.max = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_gen_performance.min > + ps->performance_levels[i].pcie_gen) + data->pcie_gen_performance.min = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_lane_performance.max < + ps->performance_levels[i].pcie_lane) + data->pcie_lane_performance.max = + ps->performance_levels[i].pcie_lane; + + if (data->pcie_lane_performance.min > + ps->performance_levels[i].pcie_lane) + data->pcie_lane_performance.min = + ps->performance_levels[i].pcie_lane; + } + break; + case PP_StateUILabel_Battery: + data->use_pcie_power_saving_levels = true; + + for (i = 0; i < ps->performance_level_count; i++) { + if (data->pcie_gen_power_saving.max < + ps->performance_levels[i].pcie_gen) + data->pcie_gen_power_saving.max = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_gen_power_saving.min > + ps->performance_levels[i].pcie_gen) + data->pcie_gen_power_saving.min = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_lane_power_saving.max < + ps->performance_levels[i].pcie_lane) + data->pcie_lane_power_saving.max = + ps->performance_levels[i].pcie_lane; + + if (data->pcie_lane_power_saving.min > + ps->performance_levels[i].pcie_lane) + data->pcie_lane_power_saving.min = + ps->performance_levels[i].pcie_lane; + } + break; + default: + break; + } + } + return 0; +} + +static void +iceland_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m) +{ + uint32_t sclk, mclk, activity_percent; + uint32_t offset; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetSclkFrequency)); + + sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetMclkFrequency)); + + mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", mclk/100, sclk/100); + + offset = data->soft_regs_start + offsetof(SMU71_SoftRegisters, AverageGraphicsActivity); + activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); + activity_percent += 0x80; + activity_percent >>= 8; + + seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent); + + seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en"); + + seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en"); +} + +int iceland_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) +{ + uint32_t num_active_displays = 0; + struct cgs_display_info info = {0}; + info.mode_info = NULL; + + cgs_get_active_displays_info(hwmgr->device, &info); + + num_active_displays = info.display_count; + + if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */ + iceland_notify_smc_display_change(hwmgr, false); + else + iceland_notify_smc_display_change(hwmgr, true); + + return 0; +} + +/** +* Programs the display gap +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always OK +*/ +int iceland_program_display_gap(struct pp_hwmgr *hwmgr) +{ + uint32_t num_active_displays = 0; + uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); + uint32_t display_gap2; + uint32_t pre_vbi_time_in_us; + uint32_t frame_time_in_us; + uint32_t ref_clock; + uint32_t refresh_rate = 0; + struct cgs_display_info info = {0}; + struct cgs_mode_info mode_info; + + info.mode_info = &mode_info; + + cgs_get_active_displays_info(hwmgr->device, &info); + num_active_displays = info.display_count; + + display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0)? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap); + + ref_clock = mode_info.ref_clock; + refresh_rate = mode_info.refresh_rate; + + if(0 == refresh_rate) + refresh_rate = 60; + + frame_time_in_us = 1000000 / refresh_rate; + + pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; + display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SOFT_REGISTERS_TABLE_4, PreVBlankGap, 0x64); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SOFT_REGISTERS_TABLE_5, VBlankTimeout, (frame_time_in_us - pre_vbi_time_in_us)); + + if (num_active_displays == 1) + iceland_notify_smc_display_change(hwmgr, true); + + return 0; +} + +int iceland_display_configuration_changed_task(struct pp_hwmgr *hwmgr) +{ + iceland_program_display_gap(hwmgr); + + return 0; +} + +/** +* Set maximum target operating fan output PWM +* +* @param pHwMgr: the address of the powerplay hardware manager. +* @param usMaxFanPwm: max operating fan PWM in percents +* @return The response that came from the SMC. +*/ +static int iceland_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) +{ + hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; + + if (phm_is_hw_access_blocked(hwmgr)) + return 0; + + return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm) ? 0 : -1); +} + +/** +* Set maximum target operating fan output RPM +* +* @param pHwMgr: the address of the powerplay hardware manager. +* @param usMaxFanRpm: max operating fan RPM value. +* @return The response that came from the SMC. +*/ +static int iceland_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) +{ + hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = us_max_fan_pwm; + + if (phm_is_hw_access_blocked(hwmgr)) + return 0; + + return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanRpmMax, us_max_fan_pwm) ? 0 : -1); +} + +static int iceland_dpm_set_interrupt_state(void *private_data, + unsigned src_id, unsigned type, + int enabled) +{ + uint32_t cg_thermal_int; + struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr; + + if (hwmgr == NULL) + return -EINVAL; + + switch (type) { + case AMD_THERMAL_IRQ_LOW_TO_HIGH: + if (enabled) { + cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); + cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); + } else { + cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); + cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); + } + break; + + case AMD_THERMAL_IRQ_HIGH_TO_LOW: + if (enabled) { + cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); + cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); + } else { + cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); + cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); + } + break; + default: + break; + } + return 0; +} + +static int iceland_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr, + const void *thermal_interrupt_info) +{ + int result; + const struct pp_interrupt_registration_info *info = + (const struct pp_interrupt_registration_info *)thermal_interrupt_info; + + if (info == NULL) + return -EINVAL; + + result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST, + iceland_dpm_set_interrupt_state, + info->call_back, info->context); + + if (result) + return -EINVAL; + + result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST, + iceland_dpm_set_interrupt_state, + info->call_back, info->context); + + if (result) + return -EINVAL; + + return 0; +} + + +static bool iceland_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + bool is_update_required = false; + struct cgs_display_info info = {0,0,NULL}; + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (data->display_timing.num_existing_displays != info.display_count) + is_update_required = true; +/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL + if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { + cgs_get_min_clock_settings(hwmgr->device, &min_clocks); + if(min_clocks.engineClockInSR != data->display_timing.minClockInSR) + is_update_required = true; +*/ + return is_update_required; +} + + +static inline bool iceland_are_power_levels_equal(const struct iceland_performance_level *pl1, + const struct iceland_performance_level *pl2) +{ + return ((pl1->memory_clock == pl2->memory_clock) && + (pl1->engine_clock == pl2->engine_clock) && + (pl1->pcie_gen == pl2->pcie_gen) && + (pl1->pcie_lane == pl2->pcie_lane)); +} + +int iceland_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, + const struct pp_hw_power_state *pstate2, bool *equal) +{ + const struct iceland_power_state *psa = cast_const_phw_iceland_power_state(pstate1); + const struct iceland_power_state *psb = cast_const_phw_iceland_power_state(pstate2); + int i; + + if (equal == NULL || psa == NULL || psb == NULL) + return -EINVAL; + + /* If the two states don't even have the same number of performance levels they cannot be the same state. */ + if (psa->performance_level_count != psb->performance_level_count) { + *equal = false; + return 0; + } + + for (i = 0; i < psa->performance_level_count; i++) { + if (!iceland_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) { + /* If we have found even one performance level pair that is different the states are different. */ + *equal = false; + return 0; + } + } + + /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ + *equal = ((psa->uvd_clocks.VCLK == psb->uvd_clocks.VCLK) && (psa->uvd_clocks.DCLK == psb->uvd_clocks.DCLK)); + *equal &= ((psa->vce_clocks.EVCLK == psb->vce_clocks.EVCLK) && (psa->vce_clocks.ECCLK == psb->vce_clocks.ECCLK)); + *equal &= (psa->sclk_threshold == psb->sclk_threshold); + *equal &= (psa->acp_clk == psb->acp_clk); + + return 0; +} + +static int iceland_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +{ + if (mode) { + /* stop auto-manage */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) + iceland_fan_ctrl_stop_smc_fan_control(hwmgr); + iceland_fan_ctrl_set_static_mode(hwmgr, mode); + } else + /* restart auto-manage */ + iceland_fan_ctrl_reset_fan_speed_to_default(hwmgr); + + return 0; +} + +static int iceland_get_fan_control_mode(struct pp_hwmgr *hwmgr) +{ + if (hwmgr->fan_ctrl_is_in_default_mode) + return hwmgr->fan_ctrl_default_mode; + else + return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL2, FDO_PWM_MODE); +} + +static int iceland_force_clock_level(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, uint32_t mask) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) + return -EINVAL; + + switch (type) { + case PP_SCLK: + if (!data->sclk_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); + break; + case PP_MCLK: + if (!data->mclk_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); + break; + case PP_PCIE: + { + uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask; + uint32_t level = 0; + + while (tmp >>= 1) + level++; + + if (!data->pcie_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_ForceLevel, + level); + break; + } + default: + break; + } + + return 0; +} + +static int iceland_print_clock_levels(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, char *buf) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct iceland_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct iceland_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); + int i, now, size = 0; + uint32_t clock, pcie_speed; + + switch (type) { + case PP_SCLK: + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); + clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + for (i = 0; i < sclk_table->count; i++) { + if (clock > sclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < sclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, sclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_MCLK: + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); + clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + for (i = 0; i < mclk_table->count; i++) { + if (clock > mclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < mclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, mclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_PCIE: + pcie_speed = iceland_get_current_pcie_speed(hwmgr); + for (i = 0; i < pcie_table->count; i++) { + if (pcie_speed != pcie_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < pcie_table->count; i++) + size += sprintf(buf + size, "%d: %s %s\n", i, + (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" : + (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : + (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", + (i == now) ? "*" : ""); + break; + default: + break; + } + return size; +} + +static int iceland_get_sclk_od(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct iceland_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + int value; + + value = (sclk_table->dpm_levels[sclk_table->count - 1].value - + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * + 100 / + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return value; +} + +static int iceland_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + struct pp_power_state *ps; + struct iceland_power_state *iceland_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + iceland_ps = cast_phw_iceland_power_state(&ps->hardware); + + iceland_ps->performance_levels[iceland_ps->performance_level_count - 1].engine_clock = + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * + value / 100 + + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return 0; +} + +static int iceland_get_mclk_od(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct iceland_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + int value; + + value = (mclk_table->dpm_levels[mclk_table->count - 1].value - + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * + 100 / + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return value; +} + +uint32_t iceland_get_xclk(struct pp_hwmgr *hwmgr) +{ + uint32_t reference_clock; + uint32_t tc; + uint32_t divide; + + ATOM_FIRMWARE_INFO *fw_info; + uint16_t size; + uint8_t frev, crev; + int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); + + tc = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK); + + if (tc) + return TCLK; + + fw_info = (ATOM_FIRMWARE_INFO *)cgs_atom_get_data_table(hwmgr->device, index, + &size, &frev, &crev); + + if (!fw_info) + return 0; + + reference_clock = le16_to_cpu(fw_info->usReferenceClock); + + divide = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE); + + if (0 != divide) + return reference_clock / 4; + + return reference_clock; +} + +static int iceland_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + struct pp_power_state *ps; + struct iceland_power_state *iceland_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + iceland_ps = cast_phw_iceland_power_state(&ps->hardware); + + iceland_ps->performance_levels[iceland_ps->performance_level_count - 1].memory_clock = + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * + value / 100 + + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return 0; +} + +static const struct pp_hwmgr_func iceland_hwmgr_funcs = { + .backend_init = &iceland_hwmgr_backend_init, + .backend_fini = &iceland_hwmgr_backend_fini, + .asic_setup = &iceland_setup_asic_task, + .dynamic_state_management_enable = &iceland_enable_dpm_tasks, + .apply_state_adjust_rules = iceland_apply_state_adjust_rules, + .force_dpm_level = &iceland_force_dpm_level, + .power_state_set = iceland_set_power_state_tasks, + .get_power_state_size = iceland_get_power_state_size, + .get_mclk = iceland_dpm_get_mclk, + .get_sclk = iceland_dpm_get_sclk, + .patch_boot_state = iceland_dpm_patch_boot_state, + .get_pp_table_entry = iceland_get_pp_table_entry, + .get_num_of_pp_table_entries = iceland_get_num_of_entries, + .print_current_perforce_level = iceland_print_current_perforce_level, + .powerdown_uvd = iceland_phm_powerdown_uvd, + .powergate_uvd = iceland_phm_powergate_uvd, + .powergate_vce = iceland_phm_powergate_vce, + .disable_clock_power_gating = iceland_phm_disable_clock_power_gating, + .update_clock_gatings = iceland_phm_update_clock_gatings, + .notify_smc_display_config_after_ps_adjustment = iceland_notify_smc_display_config_after_ps_adjustment, + .display_config_changed = iceland_display_configuration_changed_task, + .set_max_fan_pwm_output = iceland_set_max_fan_pwm_output, + .set_max_fan_rpm_output = iceland_set_max_fan_rpm_output, + .get_temperature = iceland_thermal_get_temperature, + .stop_thermal_controller = iceland_thermal_stop_thermal_controller, + .get_fan_speed_info = iceland_fan_ctrl_get_fan_speed_info, + .get_fan_speed_percent = iceland_fan_ctrl_get_fan_speed_percent, + .set_fan_speed_percent = iceland_fan_ctrl_set_fan_speed_percent, + .reset_fan_speed_to_default = iceland_fan_ctrl_reset_fan_speed_to_default, + .get_fan_speed_rpm = iceland_fan_ctrl_get_fan_speed_rpm, + .set_fan_speed_rpm = iceland_fan_ctrl_set_fan_speed_rpm, + .uninitialize_thermal_controller = iceland_thermal_ctrl_uninitialize_thermal_controller, + .register_internal_thermal_interrupt = iceland_register_internal_thermal_interrupt, + .check_smc_update_required_for_display_configuration = iceland_check_smc_update_required_for_display_configuration, + .check_states_equal = iceland_check_states_equal, + .set_fan_control_mode = iceland_set_fan_control_mode, + .get_fan_control_mode = iceland_get_fan_control_mode, + .force_clock_level = iceland_force_clock_level, + .print_clock_levels = iceland_print_clock_levels, + .get_sclk_od = iceland_get_sclk_od, + .set_sclk_od = iceland_set_sclk_od, + .get_mclk_od = iceland_get_mclk_od, + .set_mclk_od = iceland_set_mclk_od, +}; + +int iceland_hwmgr_init(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data; + + data = kzalloc (sizeof(iceland_hwmgr), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + memset(data, 0x00, sizeof(iceland_hwmgr)); + + hwmgr->backend = data; + hwmgr->hwmgr_func = &iceland_hwmgr_funcs; + hwmgr->pptable_func = &pptable_funcs; + + /* thermal */ + pp_iceland_thermal_initialize(hwmgr); + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h new file mode 100644 index 000000000000..f253988de2d2 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h @@ -0,0 +1,424 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui + * + */ +#ifndef ICELAND_HWMGR_H +#define ICELAND_HWMGR_H + +#include "hwmgr.h" +#include "ppatomctrl.h" +#include "ppinterrupt.h" +#include "ppsmc.h" +#include "iceland_powertune.h" +#include "pp_endian.h" +#include "smu71_discrete.h" + +#define ICELAND_MAX_HARDWARE_POWERLEVELS 2 +#define ICELAND_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15 + +struct iceland_performance_level { + uint32_t memory_clock; + uint32_t engine_clock; + uint16_t pcie_gen; + uint16_t pcie_lane; +}; + +struct _phw_iceland_bacos { + uint32_t best_match; + uint32_t baco_flags; + struct iceland_performance_level performance_level; +}; +typedef struct _phw_iceland_bacos phw_iceland_bacos; + +struct _phw_iceland_uvd_clocks { + uint32_t VCLK; + uint32_t DCLK; +}; + +typedef struct _phw_iceland_uvd_clocks phw_iceland_uvd_clocks; + +struct _phw_iceland_vce_clocks { + uint32_t EVCLK; + uint32_t ECCLK; +}; + +typedef struct _phw_iceland_vce_clocks phw_iceland_vce_clocks; + +struct iceland_power_state { + uint32_t magic; + phw_iceland_uvd_clocks uvd_clocks; + phw_iceland_vce_clocks vce_clocks; + uint32_t sam_clk; + uint32_t acp_clk; + uint16_t performance_level_count; + bool dc_compatible; + uint32_t sclk_threshold; + struct iceland_performance_level performance_levels[ICELAND_MAX_HARDWARE_POWERLEVELS]; +}; + +struct _phw_iceland_dpm_level { + bool enabled; + uint32_t value; + uint32_t param1; +}; +typedef struct _phw_iceland_dpm_level phw_iceland_dpm_level; + +#define ICELAND_MAX_DEEPSLEEP_DIVIDER_ID 5 +#define MAX_REGULAR_DPM_NUMBER 8 +#define ICELAND_MINIMUM_ENGINE_CLOCK 5000 + +struct iceland_single_dpm_table { + uint32_t count; + phw_iceland_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER]; +}; + +struct iceland_dpm_table { + struct iceland_single_dpm_table sclk_table; + struct iceland_single_dpm_table mclk_table; + struct iceland_single_dpm_table pcie_speed_table; + struct iceland_single_dpm_table vddc_table; + struct iceland_single_dpm_table vdd_gfx_table; + struct iceland_single_dpm_table vdd_ci_table; + struct iceland_single_dpm_table mvdd_table; +}; +typedef struct _phw_iceland_dpm_table phw_iceland_dpm_table; + + +struct _phw_iceland_clock_regisiters { + uint32_t vCG_SPLL_FUNC_CNTL; + uint32_t vCG_SPLL_FUNC_CNTL_2; + uint32_t vCG_SPLL_FUNC_CNTL_3; + uint32_t vCG_SPLL_FUNC_CNTL_4; + uint32_t vCG_SPLL_SPREAD_SPECTRUM; + uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t vDLL_CNTL; + uint32_t vMCLK_PWRMGT_CNTL; + uint32_t vMPLL_AD_FUNC_CNTL; + uint32_t vMPLL_DQ_FUNC_CNTL; + uint32_t vMPLL_FUNC_CNTL; + uint32_t vMPLL_FUNC_CNTL_1; + uint32_t vMPLL_FUNC_CNTL_2; + uint32_t vMPLL_SS1; + uint32_t vMPLL_SS2; +}; +typedef struct _phw_iceland_clock_regisiters phw_iceland_clock_registers; + +struct _phw_iceland_voltage_smio_registers { + uint32_t vs0_vid_lower_smio_cntl; +}; +typedef struct _phw_iceland_voltage_smio_registers phw_iceland_voltage_smio_registers; + + +struct _phw_iceland_mc_reg_entry { + uint32_t mclk_max; + uint32_t mc_data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; +typedef struct _phw_iceland_mc_reg_entry phw_iceland_mc_reg_entry; + +struct _phw_iceland_mc_reg_table { + uint8_t last; /* number of registers*/ + uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/ + uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/ + phw_iceland_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; + SMU71_Discrete_MCRegisterAddress mc_reg_address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; +typedef struct _phw_iceland_mc_reg_table phw_iceland_mc_reg_table; + +#define DISABLE_MC_LOADMICROCODE 1 +#define DISABLE_MC_CFGPROGRAMMING 2 + + +/*Ultra Low Voltage parameter structure */ +struct phw_iceland_ulv_parm{ + bool ulv_supported; + uint32_t ch_ulv_parameter; + uint32_t ulv_volt_change_delay; + struct iceland_performance_level ulv_power_level; +}; + +#define ICELAND_MAX_LEAKAGE_COUNT 8 + +struct phw_iceland_leakage_voltage { + uint16_t count; + uint16_t leakage_id[ICELAND_MAX_LEAKAGE_COUNT]; + uint16_t actual_voltage[ICELAND_MAX_LEAKAGE_COUNT]; +}; + +struct _phw_iceland_display_timing { + uint32_t min_clock_insr; + uint32_t num_existing_displays; +}; +typedef struct _phw_iceland_display_timing phw_iceland_display_timing; + + +struct phw_iceland_thermal_temperature_setting +{ + long temperature_low; + long temperature_high; + long temperature_shutdown; +}; + +struct _phw_iceland_dpmlevel_enable_mask { + uint32_t uvd_dpm_enable_mask; + uint32_t vce_dpm_enable_mask; + uint32_t acp_dpm_enable_mask; + uint32_t samu_dpm_enable_mask; + uint32_t sclk_dpm_enable_mask; + uint32_t mclk_dpm_enable_mask; + uint32_t pcie_dpm_enable_mask; +}; +typedef struct _phw_iceland_dpmlevel_enable_mask phw_iceland_dpmlevel_enable_mask; + +struct _phw_iceland_pcie_perf_range { + uint16_t max; + uint16_t min; +}; +typedef struct _phw_iceland_pcie_perf_range phw_iceland_pcie_perf_range; + +struct _phw_iceland_vbios_boot_state { + uint16_t mvdd_bootup_value; + uint16_t vddc_bootup_value; + uint16_t vddci_bootup_value; + uint16_t vddgfx_bootup_value; + uint32_t sclk_bootup_value; + uint32_t mclk_bootup_value; + uint16_t pcie_gen_bootup_value; + uint16_t pcie_lane_bootup_value; +}; +typedef struct _phw_iceland_vbios_boot_state phw_iceland_vbios_boot_state; + +#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 +#define DPMTABLE_OD_UPDATE_MCLK 0x00000002 +#define DPMTABLE_UPDATE_SCLK 0x00000004 +#define DPMTABLE_UPDATE_MCLK 0x00000008 + +/* We need to review which fields are needed. */ +/* This is mostly a copy of the RV7xx/Evergreen structure which is close, but not identical to the N.Islands one. */ +struct iceland_hwmgr { + struct iceland_dpm_table dpm_table; + struct iceland_dpm_table golden_dpm_table; + + uint32_t voting_rights_clients0; + uint32_t voting_rights_clients1; + uint32_t voting_rights_clients2; + uint32_t voting_rights_clients3; + uint32_t voting_rights_clients4; + uint32_t voting_rights_clients5; + uint32_t voting_rights_clients6; + uint32_t voting_rights_clients7; + uint32_t static_screen_threshold_unit; + uint32_t static_screen_threshold; + uint32_t voltage_control; + uint32_t vdd_gfx_control; + + uint32_t vddc_vddci_delta; + uint32_t vddc_vddgfx_delta; + + struct pp_interrupt_registration_info internal_high_thermal_interrupt_info; + struct pp_interrupt_registration_info internal_low_thermal_interrupt_info; + struct pp_interrupt_registration_info smc_to_host_interrupt_info; + uint32_t active_auto_throttle_sources; + + struct pp_interrupt_registration_info external_throttle_interrupt; + irq_handler_func_t external_throttle_callback; + void *external_throttle_context; + + struct pp_interrupt_registration_info ctf_interrupt_info; + irq_handler_func_t ctf_callback; + void *ctf_context; + + phw_iceland_clock_registers clock_registers; + phw_iceland_voltage_smio_registers voltage_smio_registers; + + bool is_memory_GDDR5; + uint16_t acpi_vddc; + bool pspp_notify_required; /* Flag to indicate if PSPP notification to SBIOS is required */ + uint16_t force_pcie_gen; /* The forced PCI-E speed if not 0xffff */ + uint16_t acpi_pcie_gen; /* The PCI-E speed at ACPI time */ + uint32_t pcie_gen_cap; /* The PCI-E speed capabilities bitmap from CAIL */ + uint32_t pcie_lane_cap; /* The PCI-E lane capabilities bitmap from CAIL */ + uint32_t pcie_spc_cap; /* Symbol Per Clock Capabilities from registry */ + struct phw_iceland_leakage_voltage vddc_leakage; /* The Leakage VDDC supported (based on leakage ID).*/ + struct phw_iceland_leakage_voltage vddcgfx_leakage; /* The Leakage VDDC supported (based on leakage ID). */ + struct phw_iceland_leakage_voltage vddci_leakage; /* The Leakage VDDCI supported (based on leakage ID). */ + + uint32_t mvdd_control; + uint32_t vddc_mask_low; + uint32_t mvdd_mask_low; + uint16_t max_vddc_in_pp_table; /* the maximum VDDC value in the powerplay table*/ + uint16_t min_vddc_in_pp_table; + uint16_t max_vddci_in_pp_table; /* the maximum VDDCI value in the powerplay table */ + uint16_t min_vddci_in_pp_table; + uint32_t mclk_strobe_mode_threshold; + uint32_t mclk_stutter_mode_threshold; + uint32_t mclk_edc_enable_threshold; + uint32_t mclk_edc_wr_enable_threshold; + bool is_uvd_enabled; + bool is_xdma_enabled; + phw_iceland_vbios_boot_state vbios_boot_state; + + bool battery_state; + bool is_tlu_enabled; + bool pcie_performance_request; + + /* -------------- SMC SRAM Address of firmware header tables ----------------*/ + uint32_t sram_end; /* The first address after the SMC SRAM. */ + uint32_t dpm_table_start; /* The start of the dpm table in the SMC SRAM. */ + uint32_t soft_regs_start; /* The start of the soft registers in the SMC SRAM. */ + uint32_t mc_reg_table_start; /* The start of the mc register table in the SMC SRAM. */ + uint32_t fan_table_start; /* The start of the fan table in the SMC SRAM. */ + uint32_t arb_table_start; /* The start of the ARB setting table in the SMC SRAM. */ + uint32_t ulv_settings_start; + SMU71_Discrete_DpmTable smc_state_table; /* The carbon copy of the SMC state table. */ + SMU71_Discrete_MCRegisters mc_reg_table; + SMU71_Discrete_Ulv ulv_setting; /* The carbon copy of ULV setting. */ + + /* -------------- Stuff originally coming from Evergreen --------------------*/ + phw_iceland_mc_reg_table iceland_mc_reg_table; + uint32_t vdd_ci_control; + pp_atomctrl_voltage_table vddc_voltage_table; + pp_atomctrl_voltage_table vddci_voltage_table; + pp_atomctrl_voltage_table vddgfx_voltage_table; + pp_atomctrl_voltage_table mvdd_voltage_table; + + uint32_t mgcg_cgtt_local2; + uint32_t mgcg_cgtt_local3; + uint32_t gpio_debug; + uint32_t mc_micro_code_feature; + uint32_t highest_mclk; + uint16_t acpi_vdd_ci; + uint8_t mvdd_high_index; + uint8_t mvdd_low_index; + bool dll_defaule_on; + bool performance_request_registered; + + /* ----------------- Low Power Features ---------------------*/ + phw_iceland_bacos bacos; + struct phw_iceland_ulv_parm ulv; + + /* ----------------- CAC Stuff ---------------------*/ + uint32_t cac_table_start; + bool cac_configuration_required; /* TRUE if PP_CACConfigurationRequired == 1 */ + bool driver_calculate_cac_leakage; /* TRUE if PP_DriverCalculateCACLeakage == 1 */ + bool cac_enabled; + + /* ----------------- DPM2 Parameters ---------------------*/ + uint32_t power_containment_features; + bool enable_bapm_feature; + bool enable_dte_feature; + bool enable_tdc_limit_feature; + bool enable_pkg_pwr_tracking_feature; + bool disable_uvd_power_tune_feature; + struct iceland_pt_defaults *power_tune_defaults; + SMU71_Discrete_PmFuses power_tune_table; + uint32_t ul_dte_tj_offset; /* Fudge factor in DPM table to correct HW DTE errors */ + uint32_t fast_watermark_threshold; /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */ + + /* ----------------- Phase Shedding ---------------------*/ + bool vddc_phase_shed_control; + + /* --------------------- DI/DT --------------------------*/ + phw_iceland_display_timing display_timing; + + /* --------- ReadRegistry data for memory and engine clock margins ---- */ + uint32_t engine_clock_data; + uint32_t memory_clock_data; + + /* -------- Thermal Temperature Setting --------------*/ + struct phw_iceland_thermal_temperature_setting thermal_temp_setting; + phw_iceland_dpmlevel_enable_mask dpm_level_enable_mask; + + uint32_t need_update_smu7_dpm_table; + uint32_t sclk_dpm_key_disabled; + uint32_t mclk_dpm_key_disabled; + uint32_t pcie_dpm_key_disabled; + /* used to store the previous dal min sclock */ + uint32_t min_engine_clocks; + phw_iceland_pcie_perf_range pcie_gen_performance; + phw_iceland_pcie_perf_range pcie_lane_performance; + phw_iceland_pcie_perf_range pcie_gen_power_saving; + phw_iceland_pcie_perf_range pcie_lane_power_saving; + bool use_pcie_performance_levels; + bool use_pcie_power_saving_levels; + /* percentage value from 0-100, default 50 */ + uint32_t activity_target[SMU71_MAX_LEVELS_GRAPHICS]; + uint32_t mclk_activity_target; + uint32_t low_sclk_interrupt_threshold; + uint32_t last_mclk_dpm_enable_mask; + bool uvd_enabled; + uint32_t pcc_monitor_enabled; + + /* --------- Power Gating States ------------*/ + bool uvd_power_gated; /* 1: gated, 0:not gated */ + bool vce_power_gated; /* 1: gated, 0:not gated */ + bool samu_power_gated; /* 1: gated, 0:not gated */ + bool acp_power_gated; /* 1: gated, 0:not gated */ + bool pg_acp_init; + + /* soft pptable for re-uploading into smu */ + void *soft_pp_table; +}; + +typedef struct iceland_hwmgr iceland_hwmgr; + +int iceland_hwmgr_init(struct pp_hwmgr *hwmgr); +int iceland_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate); +uint32_t iceland_get_xclk(struct pp_hwmgr *hwmgr); +int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr); +int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr); + +#define ICELAND_DPM2_NEAR_TDP_DEC 10 +#define ICELAND_DPM2_ABOVE_SAFE_INC 5 +#define ICELAND_DPM2_BELOW_SAFE_INC 20 + +/* + * Log2 of the LTA window size (l2numWin_TDP). Eg. If LTA windows size + * is 128, then this value should be Log2(128) = 7. + */ +#define ICELAND_DPM2_LTA_WINDOW_SIZE 7 + +#define ICELAND_DPM2_LTS_TRUNCATE 0 + +#define ICELAND_DPM2_TDP_SAFE_LIMIT_PERCENT 80 // Maximum 100 + +#define ICELAND_DPM2_MAXPS_PERCENT_H 90 // Maximum 0xFF +#define ICELAND_DPM2_MAXPS_PERCENT_M 90 // Maximum 0xFF + +#define ICELAND_DPM2_PWREFFICIENCYRATIO_MARGIN 50 + +#define ICELAND_DPM2_SQ_RAMP_MAX_POWER 0x3FFF +#define ICELAND_DPM2_SQ_RAMP_MIN_POWER 0x12 +#define ICELAND_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15 +#define ICELAND_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE 0x1E +#define ICELAND_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO 0xF + +#define ICELAND_VOLTAGE_CONTROL_NONE 0x0 +#define ICELAND_VOLTAGE_CONTROL_BY_GPIO 0x1 +#define ICELAND_VOLTAGE_CONTROL_BY_SVID2 0x2 + +/* convert to Q8.8 format for firmware */ +#define ICELAND_Q88_FORMAT_CONVERSION_UNIT 256 + +#define ICELAND_UNUSED_GPIO_PIN 0x7F + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c new file mode 100644 index 000000000000..d10cd9f7c2df --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c @@ -0,0 +1,491 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui + * + */ + +#include "amdgpu.h" +#include "hwmgr.h" +#include "smumgr.h" +#include "iceland_hwmgr.h" +#include "iceland_powertune.h" +#include "iceland_smumgr.h" +#include "smu71_discrete.h" +#include "smu71.h" +#include "pp_debug.h" +#include "cgs_common.h" +#include "pp_endian.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#define VOLTAGE_SCALE 4 +#define POWERTUNE_DEFAULT_SET_MAX 1 + +#define DEVICE_ID_VI_ICELAND_M_6900 0x6900 +#define DEVICE_ID_VI_ICELAND_M_6901 0x6901 +#define DEVICE_ID_VI_ICELAND_M_6902 0x6902 +#define DEVICE_ID_VI_ICELAND_M_6903 0x6903 + + +struct iceland_pt_defaults defaults_iceland = +{ + /* + * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, + * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT + */ + 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, + { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 }, + { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } +}; + +/* 35W - XT, XTL */ +struct iceland_pt_defaults defaults_icelandxt = +{ + /* + * sviLoadLIneEn, SviLoadLineVddC, + * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, + * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, + * BAPM_TEMP_GRADIENT + */ + 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0, + { 0xA7, 0x0, 0x0, 0xB5, 0x0, 0x0, 0x9F, 0x0, 0x0, 0xD6, 0x0, 0x0, 0xD7, 0x0, 0x0}, + { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0} +}; + +/* 25W - PRO, LE */ +struct iceland_pt_defaults defaults_icelandpro = +{ + /* + * sviLoadLIneEn, SviLoadLineVddC, + * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, + * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, + * BAPM_TEMP_GRADIENT + */ + 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0, + { 0xB7, 0x0, 0x0, 0xC3, 0x0, 0x0, 0xB5, 0x0, 0x0, 0xEA, 0x0, 0x0, 0xE6, 0x0, 0x0}, + { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0} +}; + +void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint32_t tmp = 0; + struct cgs_system_info sys_info = {0}; + uint32_t pdev_id; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV; + cgs_query_system_info(hwmgr->device, &sys_info); + pdev_id = (uint32_t)sys_info.value; + + switch (pdev_id) { + case DEVICE_ID_VI_ICELAND_M_6900: + case DEVICE_ID_VI_ICELAND_M_6903: + data->power_tune_defaults = &defaults_icelandxt; + break; + + case DEVICE_ID_VI_ICELAND_M_6901: + case DEVICE_ID_VI_ICELAND_M_6902: + data->power_tune_defaults = &defaults_icelandpro; + break; + default: + /* TODO: need to assign valid defaults */ + data->power_tune_defaults = &defaults_iceland; + pr_warning("Unknown V.I. Device ID.\n"); + break; + } + + /* Assume disabled */ + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SQRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TDRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TCPRamping); + + data->ul_dte_tj_offset = tmp; + + if (!tmp) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + + data->fast_watermark_threshold = 100; + + if (hwmgr->powercontainment_enabled) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + tmp = 1; + data->enable_dte_feature = tmp ? false : true; + data->enable_tdc_limit_feature = tmp ? true : false; + data->enable_pkg_pwr_tracking_feature = tmp ? true : false; + } + } +} + +int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_pt_defaults *defaults = data->power_tune_defaults; + SMU71_Discrete_DpmTable *dpm_table = &(data->smc_state_table); + struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table; + struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table; + uint16_t *def1, *def2; + int i, j, k; + + /* + * TDP number of fraction bits are changed from 8 to 7 for Iceland + * as requested by SMC team + */ + dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256)); + dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256)); + + dpm_table->DTETjOffset = (uint8_t)data->ul_dte_tj_offset; + + dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES); + dpm_table->GpuTjHyst = 8; + + dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base; + + /* The following are for new Iceland Multi-input fan/thermal control */ + if(NULL != ppm) { + dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000; + dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256; + } else { + dpm_table->PPM_PkgPwrLimit = 0; + dpm_table->PPM_TemperatureLimit = 0; + } + + CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit); + CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit); + + dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient); + def1 = defaults->bapmti_r; + def2 = defaults->bapmti_rc; + + for (i = 0; i < SMU71_DTE_ITERATIONS; i++) { + for (j = 0; j < SMU71_DTE_SOURCES; j++) { + for (k = 0; k < SMU71_DTE_SINKS; k++) { + dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1); + dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2); + def1++; + def2++; + } + } + } + + return 0; +} + +static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + const struct iceland_pt_defaults *defaults = data->power_tune_defaults; + + data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; + data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc; + data->power_tune_table.SviLoadLineTrimVddC = 3; + data->power_tune_table.SviLoadLineOffsetVddC = 0; + + return 0; +} + +static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr) +{ + uint16_t tdc_limit; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + const struct iceland_pt_defaults *defaults = data->power_tune_defaults; + + /* TDC number of fraction bits are changed from 8 to 7 + * for Iceland as requested by SMC team + */ + tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256); + data->power_tune_table.TDC_VDDC_PkgLimit = + CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); + data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = + defaults->tdc_vddc_throttle_release_limit_perc; + data->power_tune_table.TDC_MAWt = defaults->tdc_mawt; + + return 0; +} + +static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + const struct iceland_pt_defaults *defaults = data->power_tune_defaults; + uint32_t temp; + + if (iceland_read_smc_sram_dword(hwmgr->smumgr, + fuse_table_offset + + offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl), + (uint32_t *)&temp, data->sram_end)) + PP_ASSERT_WITH_CODE(false, + "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", + return -EINVAL); + else + data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl; + + return 0; +} + +static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr) +{ + int i; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 8; i++) + data->power_tune_table.GnbLPML[i] = 0; + + return 0; +} + +static int iceland_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint16_t HiSidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd; + uint16_t LoSidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd; + struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table; + + HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); + LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); + + data->power_tune_table.BapmVddCBaseLeakageHiSidd = + CONVERT_FROM_HOST_TO_SMC_US(HiSidd); + data->power_tune_table.BapmVddCBaseLeakageLoSidd = + CONVERT_FROM_HOST_TO_SMC_US(LoSidd); + + return 0; +} + +int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint32_t pm_fuse_table_offset; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, PmFuseTable), + &pm_fuse_table_offset, data->sram_end)) + PP_ASSERT_WITH_CODE(false, + "Attempt to get pm_fuse_table_offset Failed!", + return -EINVAL); + + /* DW0 - DW3 */ + if (iceland_populate_bapm_vddc_vid_sidd(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate bapm vddc vid Failed!", + return -EINVAL); + + /* DW4 - DW5 */ + if (iceland_populate_vddc_vid(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate vddc vid Failed!", + return -EINVAL); + + /* DW6 */ + if (iceland_populate_svi_load_line(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate SviLoadLine Failed!", + return -EINVAL); + /* DW7 */ + if (iceland_populate_tdc_limit(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TDCLimit Failed!", return -EINVAL); + /* DW8 */ + if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TdcWaterfallCtl, " + "LPMLTemperature Min and Max Failed!", + return -EINVAL); + + /* DW9-DW12 */ + if (0 != iceland_populate_temperature_scaler(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate LPMLTemperatureScaler Failed!", + return -EINVAL); + + /* DW13-DW16 */ + if (iceland_populate_gnb_lpml(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Failed!", + return -EINVAL); + + /* DW17 */ + if (iceland_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Min and Max Vid Failed!", + return -EINVAL); + + /* DW18 */ + if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!", + return -EINVAL); + + if (iceland_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + (uint8_t *)&data->power_tune_table, + sizeof(struct SMU71_Discrete_PmFuses), data->sram_end)) + PP_ASSERT_WITH_CODE(false, + "Attempt to download PmFuseTable Failed!", + return -EINVAL); + } + return 0; +} + +int iceland_enable_smc_cac(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC)) { + int smc_result; + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_EnableCac)); + PP_ASSERT_WITH_CODE((0 == smc_result), + "Failed to enable CAC in SMC.", result = -1); + + data->cac_enabled = (0 == smc_result) ? true : false; + } + return result; +} + +static int iceland_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + if(data->power_containment_features & + POWERCONTAINMENT_FEATURE_PkgPwrLimit) + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PkgPwrSetLimit, n); + return 0; +} + +static int iceland_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp) +{ + return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr, + PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); +} + +int iceland_enable_power_containment(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + SMU71_Discrete_DpmTable *dpm_table = &data->smc_state_table; + int smc_result; + int result = 0; + uint32_t is_asic_kicker; + + data->power_containment_features = 0; + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + is_asic_kicker = cgs_read_register(hwmgr->device, mmCC_BIF_BX_STRAP2); + is_asic_kicker = (is_asic_kicker >> 12) & 0x01; + + if (data->enable_bapm_feature && + (!is_asic_kicker || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableUsingActualTemperatureForPowerCalc))) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_EnableDTE)); + PP_ASSERT_WITH_CODE((0 == smc_result), + "Failed to enable BAPM in SMC.", result = -1;); + if (0 == smc_result) + data->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM; + } + + if (is_asic_kicker && !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableUsingActualTemperatureForPowerCalc)) + dpm_table->DTEMode = 2; + + if (data->enable_tdc_limit_feature) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_TDCLimitEnable)); + PP_ASSERT_WITH_CODE((0 == smc_result), + "Failed to enable TDCLimit in SMC.", result = -1;); + if (0 == smc_result) + data->power_containment_features |= + POWERCONTAINMENT_FEATURE_TDCLimit; + } + + if (data->enable_pkg_pwr_tracking_feature) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable)); + PP_ASSERT_WITH_CODE((0 == smc_result), + "Failed to enable PkgPwrTracking in SMC.", result = -1;); + if (0 == smc_result) { + struct phm_cac_tdp_table *cac_table = + hwmgr->dyn_state.cac_dtp_table; + uint32_t default_limit = + (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256); + + data->power_containment_features |= + POWERCONTAINMENT_FEATURE_PkgPwrLimit; + + if (iceland_set_power_limit(hwmgr, default_limit)) + printk(KERN_ERR "Failed to set Default Power Limit in SMC!"); + } + } + } + return result; +} + +int iceland_power_control_set_level(struct pp_hwmgr *hwmgr) +{ + struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table; + int adjust_percent, target_tdp; + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + /* adjustment percentage has already been validated */ + adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ? + hwmgr->platform_descriptor.TDPAdjustment : + (-1 * hwmgr->platform_descriptor.TDPAdjustment); + /* + * SMC requested that target_tdp to be 7 bit fraction in DPM table + * but message to be 8 bit fraction for messages + */ + target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100; + result = iceland_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp); + } + + return result; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h new file mode 100644 index 000000000000..6c25ee139ca3 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h @@ -0,0 +1,74 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui + * + */ +#ifndef ICELAND_POWERTUNE_H +#define ICELAND_POWERTUNE_H + +#include "smu71.h" + +enum iceland_pt_config_reg_type { + ICELAND_CONFIGREG_MMR = 0, + ICELAND_CONFIGREG_SMC_IND, + ICELAND_CONFIGREG_DIDT_IND, + ICELAND_CONFIGREG_CACHE, + ICELAND_CONFIGREG_MAX +}; + +/* PowerContainment Features */ +#define POWERCONTAINMENT_FEATURE_DTE 0x00000001 +#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 +#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 +#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001 + +struct iceland_pt_config_reg { + uint32_t offset; + uint32_t mask; + uint32_t shift; + uint32_t value; + enum iceland_pt_config_reg_type type; +}; + +struct iceland_pt_defaults +{ + uint8_t svi_load_line_en; + uint8_t svi_load_line_vddc; + uint8_t tdc_vddc_throttle_release_limit_perc; + uint8_t tdc_mawt; + uint8_t tdc_waterfall_ctl; + uint8_t dte_ambient_temp_base; + uint32_t display_cac; + uint32_t bamp_temp_gradient; + uint16_t bapmti_r[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS]; + uint16_t bapmti_rc[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS]; +}; + +void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); +int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); +int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr); +int iceland_enable_smc_cac(struct pp_hwmgr *hwmgr); +int iceland_enable_power_containment(struct pp_hwmgr *hwmgr); +int iceland_power_control_set_level(struct pp_hwmgr *hwmgr); + +#endif /* ICELAND_POWERTUNE_H */ + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c new file mode 100644 index 000000000000..527f37022424 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c @@ -0,0 +1,595 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui + * + */ +#include +#include "iceland_thermal.h" +#include "iceland_hwmgr.h" +#include "iceland_smumgr.h" +#include "atombios.h" +#include "ppsmc.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "smu/smu_7_1_1_d.h" +#include "smu/smu_7_1_1_sh_mask.h" + + +/** +* Get Fan Speed Control Parameters. +* @param hwmgr the address of the powerplay hardware manager. +* @param pSpeed is the address of the structure where the result is to be placed. +* @exception Always succeeds except if we cannot zero out the output structure. +*/ +int iceland_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, + struct phm_fan_speed_info *fan_speed_info) +{ + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + fan_speed_info->supports_percent_read = true; + fan_speed_info->supports_percent_write = true; + fan_speed_info->min_percent = 0; + fan_speed_info->max_percent = 100; + + if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) { + fan_speed_info->supports_rpm_read = true; + fan_speed_info->supports_rpm_write = true; + fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM; + fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM; + } else { + fan_speed_info->min_rpm = 0; + fan_speed_info->max_rpm = 0; + } + + return 0; +} + +/** +* Get Fan Speed in percent. +* @param hwmgr the address of the powerplay hardware manager. +* @param pSpeed is the address of the structure where the result is to be placed. +* @exception Fails is the 100% setting appears to be 0. +*/ +int iceland_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed) +{ + uint32_t duty100; + uint32_t duty; + uint64_t tmp64; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); + duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_STATUS, FDO_PWM_DUTY); + + if (0 == duty100) + return -EINVAL; + + + tmp64 = (uint64_t)duty * 100; + do_div(tmp64, duty100); + *speed = (uint32_t)tmp64; + + if (*speed > 100) + *speed = 100; + + return 0; +} + +/** +* Get Fan Speed in RPM. +* @param hwmgr the address of the powerplay hardware manager. +* @param speed is the address of the structure where the result is to be placed. +* @exception Returns not supported if no fan is found or if pulses per revolution are not set +*/ +int iceland_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) +{ + return 0; +} + +/** +* Set Fan Speed Control to static mode, so that the user can decide what speed to use. +* @param hwmgr the address of the powerplay hardware manager. +* mode the fan control mode, 0 default, 1 by percent, 5, by RPM +* @exception Should always succeed. +*/ +int iceland_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +{ + + if (hwmgr->fan_ctrl_is_in_default_mode) { + hwmgr->fan_ctrl_default_mode = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE); + hwmgr->tmin = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN); + hwmgr->fan_ctrl_is_in_default_mode = false; + } + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, 0); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, mode); + + return 0; +} + +/** +* Reset Fan Speed Control to default mode. +* @param hwmgr the address of the powerplay hardware manager. +* @exception Should always succeed. +*/ +static int iceland_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) +{ + if (!hwmgr->fan_ctrl_is_in_default_mode) { + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, hwmgr->tmin); + hwmgr->fan_ctrl_is_in_default_mode = true; + } + + return 0; +} + +int iceland_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) +{ + return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL; +} + + +int iceland_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) +{ + return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl) == 0) ? 0 : -EINVAL; +} + +/** +* Set Fan Speed in percent. +* @param hwmgr the address of the powerplay hardware manager. +* @param speed is the percentage value (0% - 100%) to be set. +* @exception Fails is the 100% setting appears to be 0. +*/ +int iceland_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed) +{ + uint32_t duty100; + uint32_t duty; + uint64_t tmp64; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return -EINVAL; + + if (speed > 100) { + pr_warning("Cannot set more than 100%% duty cycle. Set it to 100.\n"); + speed = 100; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) + iceland_fan_ctrl_stop_smc_fan_control(hwmgr); + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); + + if (0 == duty100) + return -EINVAL; + + tmp64 = (uint64_t)speed * duty100; + do_div(tmp64, 100); + duty = (uint32_t)tmp64; + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL0, FDO_STATIC_DUTY, duty); + + return iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); +} + +/** +* Reset Fan Speed to default. +* @param hwmgr the address of the powerplay hardware manager. +* @exception Always succeeds. +*/ +int iceland_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) +{ + int result; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) { + result = iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); + if (0 == result) + result = iceland_fan_ctrl_start_smc_fan_control(hwmgr); + } else + result = iceland_fan_ctrl_set_default_mode(hwmgr); + + return result; +} + +/** +* Set Fan Speed in RPM. +* @param hwmgr the address of the powerplay hardware manager. +* @param speed is the percentage value (min - max) to be set. +* @exception Fails is the speed not lie between min and max. +*/ +int iceland_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) +{ + return 0; +} + +/** +* Reads the remote temperature from the SIslands thermal controller. +* +* @param hwmgr The address of the hardware manager. +*/ +int iceland_thermal_get_temperature(struct pp_hwmgr *hwmgr) +{ + int temp; + + temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_STATUS, CTF_TEMP); + + /* + * Bit 9 means the reading is lower than the lowest usable + * value. + */ + if (0 != (0x200 & temp)) + temp = ICELAND_THERMAL_MAXIMUM_TEMP_READING; + else + temp = (temp & 0x1ff); + + temp = temp * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + return temp; +} + +/** +* Set the requested temperature range for high and low alert signals +* +* @param hwmgr The address of the hardware manager. +* @param range Temperature range to be programmed for high and low alert signals +* @exception PP_Result_BadInput if the input data is not valid. +*/ +static int iceland_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, uint32_t low_temp, uint32_t high_temp) +{ + uint32_t low = ICELAND_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + uint32_t high = ICELAND_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + if (low < low_temp) + low = low_temp; + if (high > high_temp) + high = high_temp; + + if (low > high) + return -EINVAL; + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, DIG_THERM_DPM, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + + return 0; +} + +/** +* Programs thermal controller one-time setting registers +* +* @param hwmgr The address of the hardware manager. +*/ +static int iceland_thermal_initialize(struct pp_hwmgr *hwmgr) +{ + if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_TACH_CTRL, EDGE_PER_REV, + hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1); + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28); + + return 0; +} + +/** +* Enable thermal alerts on the RV770 thermal controller. +* +* @param hwmgr The address of the hardware manager. +*/ +static int iceland_thermal_enable_alert(struct pp_hwmgr *hwmgr) +{ + uint32_t alert; + + alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK); + alert &= ~(ICELAND_THERMAL_HIGH_ALERT_MASK | ICELAND_THERMAL_LOW_ALERT_MASK); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert); + + /* send message to SMU to enable internal thermal interrupts */ + return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable) == 0) ? 0 : -1; +} + +/** +* Disable thermal alerts on the RV770 thermal controller. +* @param hwmgr The address of the hardware manager. +*/ +static int iceland_thermal_disable_alert(struct pp_hwmgr *hwmgr) +{ + uint32_t alert; + + alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK); + alert |= (ICELAND_THERMAL_HIGH_ALERT_MASK | ICELAND_THERMAL_LOW_ALERT_MASK); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert); + + /* send message to SMU to disable internal thermal interrupts */ + return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable) == 0) ? 0 : -1; +} + +/** +* Uninitialize the thermal controller. +* Currently just disables alerts. +* @param hwmgr The address of the hardware manager. +*/ +int iceland_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) +{ + int result = iceland_thermal_disable_alert(hwmgr); + + if (result) + pr_warning("Failed to disable thermal alerts!\n"); + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + iceland_fan_ctrl_set_default_mode(hwmgr); + + return result; +} + +/** +* Set up the fan table to control the fan using the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int tf_iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; + uint32_t duty100; + uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; + uint16_t fdo_min, slope1, slope2; + uint32_t reference_clock; + int res; + uint64_t tmp64; + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) + return 0; + + if (0 == data->fan_table_start) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); + + if (0 == duty100) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100; + do_div(tmp64, 10000); + fdo_min = (uint16_t)tmp64; + + t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin; + t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed; + + pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; + pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; + + slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); + slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); + + fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100); + fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100); + fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100); + + fan_table.Slope1 = cpu_to_be16(slope1); + fan_table.Slope2 = cpu_to_be16(slope2); + + fan_table.FdoMin = cpu_to_be16(fdo_min); + + fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst); + + fan_table.HystUp = cpu_to_be16(1); + + fan_table.HystSlope = cpu_to_be16(1); + + fan_table.TempRespLim = cpu_to_be16(5); + + reference_clock = iceland_get_xclk(hwmgr); + + fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600); + + fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); + + fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); + + //fan_table.FanControl_GL_Flag = 1; + + res = iceland_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), data->sram_end); +/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command. + if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit != 0) + res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanMinPwm, \ + hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit) ? 0 : -1); + + if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit != 0) + res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanSclkTarget, \ + hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit) ? 0 : -1); + + if (0 != res) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); +*/ + return 0; +} + +/** +* Start the fan control on the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int tf_iceland_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) +{ +/* If the fantable setup has failed we could have disabled PHM_PlatformCaps_MicrocodeFanControl even after this function was included in the table. + * Make sure that we still think controlling the fan is OK. +*/ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) { + iceland_fan_ctrl_start_smc_fan_control(hwmgr); + iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); + } + + return 0; +} + +/** +* Set temperature range for high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +static int tf_iceland_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input; + + if (range == NULL) + return -EINVAL; + + return iceland_thermal_set_temperature_range(hwmgr, range->min, range->max); +} + +/** +* Programs one-time setting registers +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from initialize thermal controller routine +*/ +static int tf_iceland_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, + void *output, void *storage, int result) +{ + return iceland_thermal_initialize(hwmgr); +} + +/** +* Enable high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from enable alert routine +*/ +static int tf_iceland_thermal_enable_alert(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + return iceland_thermal_enable_alert(hwmgr); +} + +/** +* Disable high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from disable alert routine +*/ +static int tf_iceland_thermal_disable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) +{ + return iceland_thermal_disable_alert(hwmgr); +} + +static const struct phm_master_table_item iceland_thermal_start_thermal_controller_master_list[] = { + { NULL, tf_iceland_thermal_initialize }, + { NULL, tf_iceland_thermal_set_temperature_range }, + { NULL, tf_iceland_thermal_enable_alert }, + /* + * We should restrict performance levels to low before we halt + * the SMC. On the other hand we are still in boot state when + * we do this so it would be pointless. If this assumption + * changes we have to revisit this table. + */ + { NULL, tf_iceland_thermal_setup_fan_table}, + { NULL, tf_iceland_thermal_start_smc_fan_control}, + { NULL, NULL } +}; + +static const struct phm_master_table_header iceland_thermal_start_thermal_controller_master = { + 0, + PHM_MasterTableFlag_None, + iceland_thermal_start_thermal_controller_master_list +}; + +static const struct phm_master_table_item iceland_thermal_set_temperature_range_master_list[] = { + { NULL, tf_iceland_thermal_disable_alert}, + { NULL, tf_iceland_thermal_set_temperature_range}, + { NULL, tf_iceland_thermal_enable_alert}, + { NULL, NULL } +}; + +static const struct phm_master_table_header iceland_thermal_set_temperature_range_master = { + 0, + PHM_MasterTableFlag_None, + iceland_thermal_set_temperature_range_master_list +}; + +int iceland_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) +{ + if (!hwmgr->thermal_controller.fanInfo.bNoFan) + iceland_fan_ctrl_set_default_mode(hwmgr); + return 0; +} + +/** +* Initializes the thermal controller related functions in the Hardware Manager structure. +* @param hwmgr The address of the hardware manager. +* @exception Any error code from the low-level communication. +*/ +int pp_iceland_thermal_initialize(struct pp_hwmgr *hwmgr) +{ + int result; + + result = phm_construct_table(hwmgr, &iceland_thermal_set_temperature_range_master, &(hwmgr->set_temperature_range)); + + if (0 == result) { + result = phm_construct_table(hwmgr, + &iceland_thermal_start_thermal_controller_master, + &(hwmgr->start_thermal_controller)); + if (0 != result) + phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range)); + } + + if (0 == result) + hwmgr->fan_ctrl_is_in_default_mode = true; + return result; +} + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h new file mode 100644 index 000000000000..267945f4df71 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h @@ -0,0 +1,58 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui + * + */ + +#ifndef ICELAND_THERMAL_H +#define ICELAND_THERMAL_H + +#include "hwmgr.h" + +#define ICELAND_THERMAL_HIGH_ALERT_MASK 0x1 +#define ICELAND_THERMAL_LOW_ALERT_MASK 0x2 + +#define ICELAND_THERMAL_MINIMUM_TEMP_READING -256 +#define ICELAND_THERMAL_MAXIMUM_TEMP_READING 255 + +#define ICELAND_THERMAL_MINIMUM_ALERT_TEMP 0 +#define ICELAND_THERMAL_MAXIMUM_ALERT_TEMP 255 + +#define FDO_PWM_MODE_STATIC 1 +#define FDO_PWM_MODE_STATIC_RPM 5 + + +extern int iceland_thermal_get_temperature(struct pp_hwmgr *hwmgr); +extern int iceland_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr); +extern int iceland_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info); +extern int iceland_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed); +extern int iceland_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode); +extern int iceland_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed); +extern int iceland_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); +extern int pp_iceland_thermal_initialize(struct pp_hwmgr *hwmgr); +extern int iceland_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr); +extern int iceland_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed); +extern int iceland_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed); +extern int iceland_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); + +#endif + From 54c825a9166f0c13aa4044500910633105a7e6a7 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Mon, 20 Jun 2016 13:35:28 +0800 Subject: [PATCH 023/171] drm/amdgpu: enable iceland powerplay manually It's able to enable iceland powerplay manually via the module parameter. The default state is disabled. Signed-off-by: Huang Rui Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index c5738a22b690..260da022beef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -112,13 +112,15 @@ static int amdgpu_pp_early_init(void *handle) case CHIP_STONEY: adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false; break; + case CHIP_TOPAZ: + adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false; + break; /* These chips don't have powerplay implemenations */ case CHIP_BONAIRE: case CHIP_HAWAII: case CHIP_KABINI: case CHIP_MULLINS: case CHIP_KAVERI: - case CHIP_TOPAZ: default: adev->pp_enabled = false; break; From 9f7aab6fd396dcb7dacee152784ddf9c2f9b0e8b Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 26 Jul 2016 16:42:11 +0800 Subject: [PATCH 024/171] drm/amd/powerplay: fix the incorrect checking condition Signed-off-by: Huang Rui Reviewed-by: Ken Wang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c index 9c6d7e3d5752..7ffbbef60639 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c @@ -4170,7 +4170,7 @@ int iceland_unforce_dpm_levels(struct pp_hwmgr *hwmgr) return -1); } - if (0 == data->pcie_dpm_key_disabled) { + if (0 == data->mclk_dpm_key_disabled) { PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc( hwmgr->smumgr, PPSMC_MSG_MCLKDPM_NoForcedLevel)), From e4e2c8f29a343953f0ff31bd2775b73cd8c07a4c Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 26 Jul 2016 15:57:07 +0800 Subject: [PATCH 025/171] drm/amd/powerplay: add deep sleep initialization This patch adds the deep sleep initialization at DPM, it needs send a message to SMC to enable this feature before enable voltage controller. Signed-off-by: Huang Rui Reviewed-by: Ken Wang Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/iceland_hwmgr.c | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c index 7ffbbef60639..7a9749f48bca 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c @@ -3069,6 +3069,36 @@ static int iceland_tf_start_smc(struct pp_hwmgr *hwmgr) return ret; } +/** +* Programs the Deep Sleep registers +* +* @param pHwMgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data (PhwEvergreen_DisplayConfiguration) +* @param pOutput the pointer to output data (unused) +* @param pStorage the pointer to temporary storage (unused) +* @param Result the last failure code (unused) +* @return always 0 +*/ +static int iceland_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) +{ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) { + if (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MASTER_DeepSleep_ON) != 0) + PP_ASSERT_WITH_CODE(false, + "Attempt to enable Master Deep Sleep switch failed!", + return -EINVAL); + } else { + if (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MASTER_DeepSleep_OFF) != 0) + PP_ASSERT_WITH_CODE(false, + "Attempt to disable Master Deep Sleep switch failed!", + return -EINVAL); + } + + return 0; +} + static int iceland_enable_dpm_tasks(struct pp_hwmgr *hwmgr) { int tmp_result, result = 0; @@ -3133,6 +3163,10 @@ static int iceland_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to enable SCLK control!", return tmp_result); + tmp_result = iceland_enable_deep_sleep_master_switch(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to enable deep sleep!", return tmp_result); + /* enable DPM */ tmp_result = iceland_start_dpm(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), From f6bf6dcaab35ba6de4a41de05271bf567ea4cadd Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 26 Jul 2016 16:25:57 +0800 Subject: [PATCH 026/171] drm/amd/powerplay: set the platform capability flags for iceland Signed-off-by: Huang Rui Reviewed-by: Ken Wang Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/iceland_hwmgr.c | 51 +++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c index 7a9749f48bca..6075050b891f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c @@ -3801,6 +3801,57 @@ static int iceland_hwmgr_backend_init(struct pp_hwmgr *hwmgr) stay_in_boot = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StayInBootState); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicPowerManagement); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ActivityReporting); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_GFXClockGatingSupport); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MemorySpreadSpectrumSupport); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EngineSpreadSpectrumSupport); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicPCIEGen2Support); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SMC); + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisablePowerGating); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_BACO); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalAutoThrottling); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableLSClockGating); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SamuDPM); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AcpDPM); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OD6inACSupport); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnablePlatformPowerManagement); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PauseMMSessions); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OD6PlusinACSupport); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PauseMMSessions); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_GFXClockGatingManagedInCAIL); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_IcelandULPSSWWorkAround); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + /* iceland doesn't support UVD and VCE */ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating); From db7992d8205cb5fce806dac5d818e75669b35f11 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 26 Jul 2016 16:33:41 +0800 Subject: [PATCH 027/171] drm/amd/powerplay: add enabling voltage controller back Signed-off-by: Huang Rui Reviewed-by: Ken Wang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c index 6075050b891f..d538d28fa8b3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c @@ -2970,13 +2970,11 @@ int iceland_start_dpm(struct pp_hwmgr *hwmgr) PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, SWRST_COMMAND_1, RESETLC, 0x0); -#if 0 PP_ASSERT_WITH_CODE( (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable)), "Failed to enable voltage DPM during DPM Start Function!", return -1); -#endif if (0 != iceland_enable_sclk_mclk_dpm(hwmgr)) { PP_ASSERT_WITH_CODE(0, "Failed to enable Sclk DPM and Mclk DPM!", return -1); From 3c18266cffc9741e1982005034a0a3494acbdf86 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 26 Jul 2016 16:38:36 +0800 Subject: [PATCH 028/171] drm/amd/powerplay: add DPM running checking back This patch adds DPM running checking back, because the DPM issue is fixed. Signed-off-by: Huang Rui Reviewed-by: Ken Wang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c index d538d28fa8b3..d8ca59b283d3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c @@ -4239,11 +4239,9 @@ int iceland_unforce_dpm_levels(struct pp_hwmgr *hwmgr) { iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); -#if 0 PP_ASSERT_WITH_CODE (0 == iceland_is_dpm_running(hwmgr), "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.", return -1); -#endif if (0 == data->sclk_dpm_key_disabled) { PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc( From 70bb246154229550e5c9095d484b39fb82047907 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 28 Jul 2016 13:35:42 -0400 Subject: [PATCH 029/171] drm/amdgpu/powerplay: enable powerplay by default on TOPAZ Now that the implementation is complete. Acked-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 260da022beef..57aa3425fd5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -106,15 +106,13 @@ static int amdgpu_pp_early_init(void *handle) break; case CHIP_TONGA: case CHIP_FIJI: + case CHIP_TOPAZ: adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true; break; case CHIP_CARRIZO: case CHIP_STONEY: adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false; break; - case CHIP_TOPAZ: - adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false; - break; /* These chips don't have powerplay implemenations */ case CHIP_BONAIRE: case CHIP_HAWAII: From e95a14a9094a8e6396fba22cbdea9201824d6fd1 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 28 Jul 2016 09:40:07 -0400 Subject: [PATCH 030/171] drm/amd/amdgpu: add mutex locking for both DPM and PP based powergating for UVD/VCE This adds a mutex lock for both DPM/PP around the changes in power gating state so that userspace can poll registers without a race condition on power state. Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 58 +++++++++++--------------- 1 file changed, 25 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index ff63b88b0ffa..411965f12c01 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1106,54 +1106,46 @@ force: void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) { - if (adev->pp_enabled) + if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) { + /* enable/disable UVD */ + mutex_lock(&adev->pm.mutex); amdgpu_dpm_powergate_uvd(adev, !enable); - else { - if (adev->pm.funcs->powergate_uvd) { + mutex_unlock(&adev->pm.mutex); + } else { + if (enable) { mutex_lock(&adev->pm.mutex); - /* enable/disable UVD */ - amdgpu_dpm_powergate_uvd(adev, !enable); + adev->pm.dpm.uvd_active = true; + adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; mutex_unlock(&adev->pm.mutex); } else { - if (enable) { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.uvd_active = true; - adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; - mutex_unlock(&adev->pm.mutex); - } else { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.uvd_active = false; - mutex_unlock(&adev->pm.mutex); - } - amdgpu_pm_compute_clocks(adev); + mutex_lock(&adev->pm.mutex); + adev->pm.dpm.uvd_active = false; + mutex_unlock(&adev->pm.mutex); } - + amdgpu_pm_compute_clocks(adev); } } void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) { - if (adev->pp_enabled) + if (adev->pp_enabled || adev->pm.funcs->powergate_vce) { + /* enable/disable VCE */ + mutex_lock(&adev->pm.mutex); amdgpu_dpm_powergate_vce(adev, !enable); - else { - if (adev->pm.funcs->powergate_vce) { + mutex_unlock(&adev->pm.mutex); + } else { + if (enable) { mutex_lock(&adev->pm.mutex); - amdgpu_dpm_powergate_vce(adev, !enable); + adev->pm.dpm.vce_active = true; + /* XXX select vce level based on ring/task */ + adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL; mutex_unlock(&adev->pm.mutex); } else { - if (enable) { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.vce_active = true; - /* XXX select vce level based on ring/task */ - adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL; - mutex_unlock(&adev->pm.mutex); - } else { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.vce_active = false; - mutex_unlock(&adev->pm.mutex); - } - amdgpu_pm_compute_clocks(adev); + mutex_lock(&adev->pm.mutex); + adev->pm.dpm.vce_active = false; + mutex_unlock(&adev->pm.mutex); } + amdgpu_pm_compute_clocks(adev); } } From bd12267dd896ff8e3dfd25ac31cbc255e9184715 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 28 Jul 2016 09:39:22 -0400 Subject: [PATCH 031/171] drm/amd/amdgpu: add pm lock to debugfs mmio entry Adds support for PM locks around access to registers that might have race conditions on PG transistions. Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 309e58cd1263..9b358df6e886 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2282,22 +2282,26 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, struct amdgpu_device *adev = f->f_inode->i_private; ssize_t result = 0; int r; - bool use_bank; + bool pm_pg_lock, use_bank; unsigned instance_bank, sh_bank, se_bank; if (size & 0x3 || *pos & 0x3) return -EINVAL; + /* are we reading registers for which a PG lock is necessary? */ + pm_pg_lock = (*pos >> 23) & 1; + if (*pos & (1ULL << 62)) { se_bank = (*pos >> 24) & 0x3FF; sh_bank = (*pos >> 34) & 0x3FF; instance_bank = (*pos >> 44) & 0x3FF; use_bank = 1; - *pos &= 0xFFFFFF; } else { use_bank = 0; } + *pos &= 0x3FFFF; + if (use_bank) { if (sh_bank >= adev->gfx.config.max_sh_per_se || se_bank >= adev->gfx.config.max_shader_engines) @@ -2307,6 +2311,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, sh_bank, instance_bank); } + if (pm_pg_lock) + mutex_lock(&adev->pm.mutex); + while (size) { uint32_t value; @@ -2332,6 +2339,9 @@ end: mutex_unlock(&adev->grbm_idx_mutex); } + if (pm_pg_lock) + mutex_unlock(&adev->pm.mutex); + return result; } From 7acc83d4df77d2ed0153db2583f00a34e31fce92 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 28 Jul 2016 09:44:14 -0400 Subject: [PATCH 032/171] drm/amd/amdgpu: don't set clockgating in uvd_v6_0_start() This is handled properly by both DPM and PP externally. Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 4fa50918e886..4dbd5ab29bba 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -396,15 +396,10 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) uvd_v6_0_mc_resume(adev); - /* Set dynamic clock gating in S/W control mode */ - if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) { - uvd_v6_0_set_sw_clock_gating(adev); - } else { - /* disable clock gating */ - uint32_t data = RREG32(mmUVD_CGC_CTRL); - data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; - WREG32(mmUVD_CGC_CTRL, data); - } + /* disable clock gating */ + tmp = RREG32(mmUVD_CGC_CTRL); + tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; + WREG32(mmUVD_CGC_CTRL, tmp); /* disable interupt */ WREG32_P(mmUVD_MASTINT_EN, 0, ~UVD_MASTINT_EN__VCPU_EN_MASK); From 2875125c3184c4b05f2c2c67540339e5b90813ad Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 28 Jul 2016 09:44:49 -0400 Subject: [PATCH 033/171] drm/amd/amdgpu: don't track state in UVD clockgating There's no need to track CG state anymore. Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 4dbd5ab29bba..7f2b5de29f67 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -959,21 +959,15 @@ static int uvd_v6_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; - static int curstate = -1; - - if (adev->asic_type == CHIP_FIJI || - adev->asic_type == CHIP_POLARIS10) - uvd_v6_set_bypass_mode(adev, enable); if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) return 0; - if (curstate == state) - return 0; + if (adev->asic_type == CHIP_FIJI || + adev->asic_type == CHIP_POLARIS10) + uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true : false); - curstate = state; - if (enable) { + if (state == AMD_CG_STATE_GATE) { /* disable HW gating and enable Sw gating */ uvd_v6_0_set_sw_clock_gating(adev); } else { From fa5d2e0c103a7763b140746d396dcb8c723e74f2 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 28 Jul 2016 09:45:07 -0400 Subject: [PATCH 034/171] drm/amd/amdgpu: enable PG_EN bit in powergating UVD Enable the PG_EN bit just before the SMU would be tasked with the PG transition. Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 7f2b5de29f67..391457f1eafd 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -997,6 +997,8 @@ static int uvd_v6_0_set_powergating_state(void *handle, if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) return 0; + WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); + if (state == AMD_PG_STATE_GATE) { uvd_v6_0_stop(adev); return 0; From 0da31ffce99ea2867e78f2b88c35c55ca15f033d Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 28 Jul 2016 09:46:00 -0400 Subject: [PATCH 035/171] drm/amd/amdgpu: Add error messages to UVD PG in DPM Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/cz_dpm.c | 41 ++++++++++++++++++++++++----- 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 2a11413ed54a..bfb6b019ff2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -2108,29 +2108,58 @@ static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) /* disable clockgating so we can properly shut down the block */ ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, AMD_CG_STATE_UNGATE); + if (ret) { + DRM_ERROR("UVD DPM Power Gating failed to set clockgating state\n"); + return; + } + /* shutdown the UVD block */ ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, AMD_PG_STATE_GATE); - /* XXX: check for errors */ + + if (ret) { + DRM_ERROR("UVD DPM Power Gating failed to set powergating state\n"); + return; + } } cz_update_uvd_dpm(adev, gate); - if (pi->caps_uvd_pg) + if (pi->caps_uvd_pg) { /* power off the UVD block */ - cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF); + ret = cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF); + if (ret) { + DRM_ERROR("UVD DPM Power Gating failed to send SMU PowerOFF message\n"); + return; + } + } } else { if (pi->caps_uvd_pg) { /* power on the UVD block */ if (pi->uvd_dynamic_pg) - cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1); + ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1); else - cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0); + ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0); + + if (ret) { + DRM_ERROR("UVD DPM Power Gating Failed to send SMU PowerON message\n"); + return; + } + /* re-init the UVD block */ ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, AMD_PG_STATE_UNGATE); + + if (ret) { + DRM_ERROR("UVD DPM Power Gating Failed to set powergating state\n"); + return; + } + /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */ ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, AMD_CG_STATE_GATE); - /* XXX: check for errors */ + if (ret) { + DRM_ERROR("UVD DPM Power Gating Failed to set clockgating state\n"); + return; + } } cz_update_uvd_dpm(adev, gate); } From f6ade30418a753e3b52329fa067250091944733f Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 28 Jul 2016 09:33:56 -0400 Subject: [PATCH 036/171] drm/amd/amdgpu: Enable carrizo GFX PG Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vi.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 03a31c53aec3..90b3ca89c40d 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1249,7 +1249,13 @@ static int vi_common_early_init(void *handle) AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS; + /* rev0 hardware requires workarounds to support PG */ adev->pg_flags = 0; + if (adev->rev_id != 0x00) { + adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_PIPELINE; + } adev->external_rev_id = adev->rev_id + 0x1; break; case CHIP_STONEY: From 65b426225b6f9d48a7250b0bd0b9141a9a6e7312 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 28 Jul 2016 09:35:57 -0400 Subject: [PATCH 037/171] drm/amd/amdgpu: Enable carrizo UVD PG Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vi.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 90b3ca89c40d..008817174365 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1254,7 +1254,8 @@ static int vi_common_early_init(void *handle) if (adev->rev_id != 0x00) { adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG | - AMD_PG_SUPPORT_GFX_PIPELINE; + AMD_PG_SUPPORT_GFX_PIPELINE | + AMD_PG_SUPPORT_UVD; } adev->external_rev_id = adev->rev_id + 0x1; break; From 2ed0936de5ccbfd670ebf558770bb89bf9fe3b96 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 28 Jul 2016 09:36:26 -0400 Subject: [PATCH 038/171] drm/amd/amdgpu: Enable carrizo VCE PG Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vi.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 008817174365..66aa1eee6325 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1255,7 +1255,8 @@ static int vi_common_early_init(void *handle) adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG | AMD_PG_SUPPORT_GFX_PIPELINE | - AMD_PG_SUPPORT_UVD; + AMD_PG_SUPPORT_UVD | + AMD_PG_SUPPORT_VCE; } adev->external_rev_id = adev->rev_id + 0x1; break; From 4e86be75c69cac5ccb599ba5b38df60336f247c9 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 28 Jul 2016 09:38:13 -0400 Subject: [PATCH 039/171] drm/amd/amdgpu: Enable stoney GFX PG Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vi.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 66aa1eee6325..c1e428495699 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1276,6 +1276,9 @@ static int vi_common_early_init(void *handle) AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS; + adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_PIPELINE; adev->external_rev_id = adev->rev_id + 0x1; break; default: From c2cdb0428026dc79b0a902ca2c4371b88e130846 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 28 Jul 2016 09:38:29 -0400 Subject: [PATCH 040/171] drm/amd/amdgpu: Enable stoney UVD PG Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vi.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index c1e428495699..9c3f7b7e1f90 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1278,7 +1278,8 @@ static int vi_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_LS; adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG | - AMD_PG_SUPPORT_GFX_PIPELINE; + AMD_PG_SUPPORT_GFX_PIPELINE | + AMD_PG_SUPPORT_UVD; adev->external_rev_id = adev->rev_id + 0x1; break; default: From 75419c437851ab28d75bbf14502342c97cf47eb4 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 28 Jul 2016 09:38:45 -0400 Subject: [PATCH 041/171] drm/amd/amdgpu: Enable stoney VCE PG Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vi.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 9c3f7b7e1f90..aaee251fe337 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1279,7 +1279,8 @@ static int vi_common_early_init(void *handle) adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG | AMD_PG_SUPPORT_GFX_PIPELINE | - AMD_PG_SUPPORT_UVD; + AMD_PG_SUPPORT_UVD | + AMD_PG_SUPPORT_VCE; adev->external_rev_id = adev->rev_id + 0x1; break; default: From a94cbfc3773b90cf7ea02d4c079ec54eeabb42d6 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 7 Jul 2016 19:39:15 +0800 Subject: [PATCH 042/171] drm/amd/powerplay: fix typos of volt/millivolt symbols in comment. Signed-off-by: Rex Zhu Reviewed-by: Alexandre Demers Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c index 769636a0c5b5..a8085e7d0bac 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c @@ -2905,8 +2905,8 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr) continue; } - /* need to make sure vddc is less than 2v or else, it could burn the ASIC. - * real voltage level in unit of 0.01mv */ + /* need to make sure vddc is less than 2V or else, it could burn the ASIC. + * real voltage level in unit of 0.01mV */ PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0), "Invalid VDDC value", result = -EINVAL;); From af223dfaf0d93e7a0ed75bed4f69e5db198b741e Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 28 Jul 2016 16:51:47 +0800 Subject: [PATCH 043/171] drm/amdgpu: add module parameters to ctrl powerplay feature Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c | 1 - drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 1 + .../gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c | 3 --- .../drm/amd/powerplay/hwmgr/fiji_powertune.c | 5 ++--- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 22 ++++++++++++++++++- .../amd/powerplay/hwmgr/iceland_powertune.c | 5 ++--- .../drm/amd/powerplay/hwmgr/polaris10_hwmgr.c | 10 --------- .../gpu/drm/amd/powerplay/inc/amd_powerplay.h | 2 +- drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 4 +++- 10 files changed, 34 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 9aa533cf4ad1..44fda31c75d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -84,6 +84,7 @@ int amdgpu_sched_jobs = 32; int amdgpu_sched_hw_submission = 2; int amdgpu_powerplay = -1; int amdgpu_powercontainment = 1; +int amdgpu_sclk_deep_sleep_en = 1; unsigned amdgpu_pcie_gen_cap = 0; unsigned amdgpu_pcie_lane_cap = 0; unsigned amdgpu_cg_mask = 0xffffffff; @@ -170,6 +171,9 @@ MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = module_param_named(powercontainment, amdgpu_powercontainment, int, 0444); #endif +MODULE_PARM_DESC(sclkdeepsleep, "SCLK Deep Sleep (1 = enable (default), 0 = disable)"); +module_param_named(sclkdeepsleep, amdgpu_sclk_deep_sleep_en, int, 0444); + MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))"); module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 57aa3425fd5a..a9b3adc81420 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -52,7 +52,6 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev) pp_init->chip_family = adev->family; pp_init->chip_id = adev->asic_type; pp_init->device = amdgpu_cgs_create_device(adev); - pp_init->powercontainment_enabled = amdgpu_powercontainment; ret = amd_powerplay_init(pp_init, amd_pp); kfree(pp_init); diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index abbb658bdc1e..12d3333b9d12 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -31,6 +31,7 @@ #include "eventmanager.h" #include "pp_debug.h" + #define PP_CHECK(handle) \ do { \ if ((handle) == NULL || (handle)->pp_valid != PP_VALID) \ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c index 120a9e2c3152..e93492b15c52 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c @@ -619,9 +619,6 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TablelessHardwareInterface); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep); - data->gpio_debug = 0; phm_cap_set(hwmgr->platform_descriptor.platformCaps, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c index 44658451a8d2..c3b2f511e364 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c @@ -77,9 +77,8 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) fiji_hwmgr->fast_watermark_threshold = 100; - if (hwmgr->powercontainment_enabled) { - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { tmp = 1; fiji_hwmgr->enable_dte_feature = tmp ? false : true; fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 65408dd22a79..384b425407fb 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -41,6 +41,25 @@ extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr); extern int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr); extern int iceland_hwmgr_init(struct pp_hwmgr *hwmgr); +static int hwmgr_set_features_platform_caps(struct pp_hwmgr *hwmgr) +{ + if (amdgpu_sclk_deep_sleep_en) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep); + else + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep); + + if (amdgpu_powercontainment) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + else + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + + return 0; +} + int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) { struct pp_hwmgr *hwmgr; @@ -60,7 +79,8 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) hwmgr->hw_revision = pp_init->rev_id; hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; hwmgr->power_source = PP_PowerSource_AC; - hwmgr->powercontainment_enabled = pp_init->powercontainment_enabled; + + hwmgr_set_features_platform_caps(hwmgr); switch (hwmgr->chip_family) { case AMDGPU_FAMILY_CZ: diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c index d10cd9f7c2df..041e9648e592 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c @@ -137,9 +137,8 @@ void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) data->fast_watermark_threshold = 100; - if (hwmgr->powercontainment_enabled) { - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { tmp = 1; data->enable_dte_feature = tmp ? false : true; data->enable_tdc_limit_feature = tmp ? true : false; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c index a8085e7d0bac..60cfcbde6f27 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c @@ -2772,9 +2772,6 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr) { struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState); @@ -2820,13 +2817,6 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping); - if (hwmgr->powercontainment_enabled) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); - else - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CAC); diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h index b764c8c05ec8..ba29a5b5a2d2 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h @@ -132,8 +132,8 @@ struct amd_pp_init { uint32_t chip_family; uint32_t chip_id; uint32_t rev_id; - bool powercontainment_enabled; }; + enum amd_pp_display_config_type{ AMD_PP_DisplayConfigType_None = 0, AMD_PP_DisplayConfigType_DP54 , diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index bf0d2accf7bf..911081caa269 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -41,6 +41,9 @@ struct phm_fan_speed_info; struct pp_atomctrl_voltage_table; +extern int amdgpu_powercontainment; +extern int amdgpu_sclk_deep_sleep_en; + enum DISPLAY_GAP { DISPLAY_GAP_VBLANK_OR_WM = 0, /* Wait for vblank or MCHG watermark. */ DISPLAY_GAP_VBLANK = 1, /* Wait for vblank. */ @@ -614,7 +617,6 @@ struct pp_hwmgr { uint32_t num_ps; struct pp_thermal_controller_info thermal_controller; bool fan_ctrl_is_in_default_mode; - bool powercontainment_enabled; uint32_t fan_ctrl_default_mode; uint32_t tmin; struct phm_microcode_version_info microcode_version_info; From 66bc3f7f03d5457f6c65790319b636bbab53e3cd Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 28 Jul 2016 17:36:35 +0800 Subject: [PATCH 044/171] drm/amdgpu: use modules parameter to ctrl deep sleep feature in dpm Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 5 ++++- drivers/gpu/drm/amd/amdgpu/cz_dpm.c | 6 +++++- drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 6 +++++- 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index e9364687cd95..389322af8a05 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -91,6 +91,7 @@ extern unsigned amdgpu_pcie_lane_cap; extern unsigned amdgpu_cg_mask; extern unsigned amdgpu_pg_mask; extern char *amdgpu_disable_cu; +extern int amdgpu_sclk_deep_sleep_en; #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index e2f0e5d58d5c..7bd6174da476 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -5873,7 +5873,10 @@ static int ci_dpm_init(struct amdgpu_device *adev) pi->pcie_dpm_key_disabled = 0; pi->thermal_sclk_dpm_enabled = 0; - pi->caps_sclk_ds = true; + if (amdgpu_sclk_deep_sleep_en) + pi->caps_sclk_ds = true; + else + pi->caps_sclk_ds = false; pi->mclk_strobe_mode_threshold = 40000; pi->mclk_stutter_mode_threshold = 40000; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index bfb6b019ff2a..794c5f36ca68 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -435,7 +435,11 @@ static int cz_dpm_init(struct amdgpu_device *adev) pi->caps_td_ramping = true; pi->caps_tcp_ramping = true; } - pi->caps_sclk_ds = true; + if (amdgpu_sclk_deep_sleep_en) + pi->caps_sclk_ds = true; + else + pi->caps_sclk_ds = false; + pi->voting_clients = 0x00c00033; pi->auto_thermal_throttling_enabled = true; pi->bapm_enabled = false; diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index a845e883f5fa..f8618a3881a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -2845,7 +2845,11 @@ static int kv_dpm_init(struct amdgpu_device *adev) pi->caps_tcp_ramping = true; } - pi->caps_sclk_ds = true; + if (amdgpu_sclk_deep_sleep_en) + pi->caps_sclk_ds = true; + else + pi->caps_sclk_ds = false; + pi->enable_auto_thermal_throttling = true; pi->disable_nb_ps3_in_battery = false; if (amdgpu_bapm == 0) From b62774fc2f731f430b8618c1d067b06cce71cbe5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 29 Jul 2016 18:03:42 -0400 Subject: [PATCH 045/171] drm/amdgpu/gmc7: add missing mullins case Looks like this got missed when we ported the code from radeon. Reviewed-by: Edward O'Callaghan Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index d24a82bd0c7a..0b0f08641eed 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -144,6 +144,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev) break; case CHIP_KAVERI: case CHIP_KABINI: + case CHIP_MULLINS: return 0; default: BUG(); } From b9a8be95b1149b74a9455f6bbe633aa241ed5204 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 29 Jul 2016 18:14:39 -0400 Subject: [PATCH 046/171] drm/amdgpu/ci: add mullins to default case for smc ucode It's already covered by the default case, but add it for consistency. Reviewed-by: Alexandre Demers Reviewed-by: Edward O'Callaghan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 7bd6174da476..a0d63a293bb0 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -5779,6 +5779,7 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev) break; case CHIP_KAVERI: case CHIP_KABINI: + case CHIP_MULLINS: default: BUG(); } From 1684d3ba488529266ce6f97b88076bd185f4790e Mon Sep 17 00:00:00 2001 From: Eric Huang Date: Thu, 28 Jul 2016 17:25:01 -0400 Subject: [PATCH 047/171] drm/amd/amdgpu: change pptable output format from ASCII to binary Reviewed-by: Edward O'Callaghan Reviewed-by: Alex Deucher Signed-off-by: Eric Huang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 411965f12c01..d4ec3cb187a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -305,7 +305,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; char *table = NULL; - int size, i; + int size; if (adev->pp_enabled) size = amdgpu_dpm_get_pp_table(adev, &table); @@ -315,10 +315,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, if (size >= PAGE_SIZE) size = PAGE_SIZE - 1; - for (i = 0; i < size; i++) { - sprintf(buf + i, "%02x", table[i]); - } - sprintf(buf + i, "\n"); + memcpy(buf, table, size); return size; } From 309889098c6095cf4c1247724c22c2943807da20 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 1 Aug 2016 14:29:15 -0400 Subject: [PATCH 048/171] drm/amdgpu/gfx8: remove stale function declaration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This got leftover somehow when I cleaned this up. Reviewed-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h index bc82c794312c..ebed1f829297 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h @@ -26,6 +26,4 @@ extern const struct amd_ip_funcs gfx_v8_0_ip_funcs; -void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); - #endif From 59b4a97742888d9d3c4daa96bd15157ebd496f81 Mon Sep 17 00:00:00 2001 From: Flora Cui Date: Tue, 19 Jul 2016 16:48:22 +0800 Subject: [PATCH 049/171] drm/amdgpu: implement amdgpu_fill_buffer() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit so that bo could be set to some pattern Signed-off-by: Flora Cui Reviewed-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 5 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 62 +++++++++++++++++++++++++ 2 files changed, 67 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 389322af8a05..9f71ff79b9c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -435,6 +435,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint32_t byte_count, struct reservation_object *resv, struct fence **fence); +int amdgpu_fill_buffer(struct amdgpu_bo *bo, + uint32_t src_data, + struct reservation_object *resv, + struct fence **fence); + int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); struct amdgpu_bo_list_entry { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index b7742e62972a..5dc9c4c30777 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1207,6 +1207,68 @@ error_free: return r; } +int amdgpu_fill_buffer(struct amdgpu_bo *bo, + uint32_t src_data, + struct reservation_object *resv, + struct fence **fence) +{ + struct amdgpu_device *adev = bo->adev; + struct amdgpu_job *job; + struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; + + uint32_t max_bytes, byte_count; + uint64_t dst_offset; + unsigned int num_loops, num_dw; + unsigned int i; + int r; + + byte_count = bo->tbo.num_pages << PAGE_SHIFT; + max_bytes = adev->mman.buffer_funcs->fill_max_bytes; + num_loops = DIV_ROUND_UP(byte_count, max_bytes); + num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw; + + /* for IB padding */ + while (num_dw & 0x7) + num_dw++; + + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); + if (r) + return r; + + if (resv) { + r = amdgpu_sync_resv(adev, &job->sync, resv, + AMDGPU_FENCE_OWNER_UNDEFINED); + if (r) { + DRM_ERROR("sync failed (%d).\n", r); + goto error_free; + } + } + + dst_offset = bo->tbo.mem.start << PAGE_SHIFT; + for (i = 0; i < num_loops; i++) { + uint32_t cur_size_in_bytes = min(byte_count, max_bytes); + + amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, + dst_offset, cur_size_in_bytes); + + dst_offset += cur_size_in_bytes; + byte_count -= cur_size_in_bytes; + } + + amdgpu_ring_pad_ib(ring, &job->ibs[0]); + WARN_ON(job->ibs[0].length_dw > num_dw); + r = amdgpu_job_submit(job, ring, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, fence); + if (r) + goto error_free; + + return 0; + +error_free: + amdgpu_job_free(job); + return r; +} + #if defined(CONFIG_DEBUG_FS) static int amdgpu_mm_dump_table(struct seq_file *m, void *data) From 4fea83ff0f61676389b17803365c1e8d2b652183 Mon Sep 17 00:00:00 2001 From: Flora Cui Date: Wed, 20 Jul 2016 14:44:38 +0800 Subject: [PATCH 050/171] drm/amdgpu: expose AMDGPU_GEM_CREATE_VRAM_CLEARED to user space MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit V2: fix the return value for fill failure and validate bo before filling data Reviewed-by: Christian König Signed-off-by: Flora Cui Reviewed-by: Chunming Zhou Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 33 ++++++++++++++++++++++ include/uapi/drm/amdgpu_drm.h | 2 ++ 2 files changed, 35 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 67de19c46ddb..d8e69a7e51f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -340,11 +340,44 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, if (unlikely(r != 0)) { return r; } + + if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && + bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { + struct fence *fence; + + if (adev->mman.buffer_funcs_ring == NULL || + !adev->mman.buffer_funcs_ring->ready) { + r = -EBUSY; + goto fail_free; + } + + r = amdgpu_bo_reserve(bo, false); + if (unlikely(r != 0)) + goto fail_free; + + amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); + if (unlikely(r != 0)) + goto fail_unreserve; + + amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); + amdgpu_bo_fence(bo, fence, false); + amdgpu_bo_unreserve(bo); + fence_put(bo->tbo.moving); + bo->tbo.moving = fence_get(fence); + fence_put(fence); + } *bo_ptr = bo; trace_amdgpu_bo_create(bo); return 0; + +fail_unreserve: + amdgpu_bo_unreserve(bo); +fail_free: + amdgpu_bo_unref(&bo); + return r; } int amdgpu_bo_create(struct amdgpu_device *adev, diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 462246aa200e..a902a602490b 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -77,6 +77,8 @@ extern "C" { #define AMDGPU_GEM_CREATE_NO_CPU_ACCESS (1 << 1) /* Flag that USWC attributes should be used for GTT */ #define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2) +/* Flag that the memory should be in VRAM and cleared */ +#define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3) struct drm_amdgpu_gem_create_in { /** the requested memory size */ From 08bd8b9f6f11e5b4f13580805ef3a38848e7fed7 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 29 Jul 2016 16:30:04 +0800 Subject: [PATCH 051/171] drm/amd/powerplay: delete useless code in hwmgr. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 36 ------- drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 108 ++------------------ 2 files changed, 7 insertions(+), 137 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 384b425407fb..ba8f57a415c7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -206,29 +206,7 @@ int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index, return 0; } -int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, - uint32_t index, uint32_t value, uint32_t mask) -{ - uint32_t i; - uint32_t cur_value; - if (hwmgr == NULL || hwmgr->device == NULL) { - printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!"); - return -EINVAL; - } - - for (i = 0; i < hwmgr->usec_timeout; i++) { - cur_value = cgs_read_register(hwmgr->device, index); - if ((cur_value & mask) != (value & mask)) - break; - udelay(1); - } - - /* timeout means wrong logic*/ - if (i == hwmgr->usec_timeout) - return -1; - return 0; -} /** @@ -251,21 +229,7 @@ void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, phm_wait_on_register(hwmgr, indirect_port + 1, mask, value); } -void phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr, - uint32_t indirect_port, - uint32_t index, - uint32_t value, - uint32_t mask) -{ - if (hwmgr == NULL || hwmgr->device == NULL) { - printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!"); - return; - } - cgs_write_register(hwmgr->device, indirect_port, index); - phm_wait_for_register_unequal(hwmgr, indirect_port + 1, - value, mask); -} bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr) { diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 911081caa269..36b4ec9c9cb1 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -639,16 +639,7 @@ extern int hw_init_power_state_table(struct pp_hwmgr *hwmgr); extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index, uint32_t value, uint32_t mask); -extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, - uint32_t index, uint32_t value, uint32_t mask); -extern uint32_t phm_read_indirect_register(struct pp_hwmgr *hwmgr, - uint32_t indirect_port, uint32_t index); - -extern void phm_write_indirect_register(struct pp_hwmgr *hwmgr, - uint32_t indirect_port, - uint32_t index, - uint32_t value); extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, uint32_t indirect_port, @@ -656,12 +647,7 @@ extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, uint32_t value, uint32_t mask); -extern void phm_wait_for_indirect_register_unequal( - struct pp_hwmgr *hwmgr, - uint32_t indirect_port, - uint32_t index, - uint32_t value, - uint32_t mask); + extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr); extern bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr); @@ -699,43 +685,7 @@ extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr); PHM_FIELD_SHIFT(reg, field)) -#define PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, index, value, mask) \ - phm_wait_on_register(hwmgr, index, value, mask) -#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, index, value, mask) \ - phm_wait_for_register_unequal(hwmgr, index, value, mask) - -#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \ - phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask) - -#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \ - phm_wait_for_indirect_register_unequal(hwmgr, mm##port##_INDEX, index, value, mask) - -#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \ - phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX_0, index, value, mask) - -#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \ - phm_wait_for_indirect_register_unequal(hwmgr, mm##port##_INDEX_0, index, value, mask) - -/* Operations on named registers. */ - -#define PHM_WAIT_REGISTER(hwmgr, reg, value, mask) \ - PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg, value, mask) - -#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \ - PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg, value, mask) - -#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ - PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) - -#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \ - PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) - -#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ - PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) - -#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \ - PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) /* Operations on named fields. */ @@ -764,60 +714,16 @@ extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr); PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ reg, field, fieldval)) -#define PHM_WAIT_FIELD(hwmgr, reg, field, fieldval) \ - PHM_WAIT_REGISTER(hwmgr, reg, (fieldval) \ - << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) +#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \ + phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask) + + +#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ + PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) #define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \ PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \ << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) -#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \ - PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \ - << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) - -#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \ - PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, (fieldval) \ - << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) - -#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \ - PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval) \ - << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) - -#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \ - PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval) \ - << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) - -/* Operations on arrays of registers & fields. */ - -#define PHM_READ_ARRAY_REGISTER(device, reg, offset) \ - cgs_read_register(device, mm##reg + (offset)) - -#define PHM_WRITE_ARRAY_REGISTER(device, reg, offset, value) \ - cgs_write_register(device, mm##reg + (offset), value) - -#define PHM_WAIT_ARRAY_REGISTER(hwmgr, reg, offset, value, mask) \ - PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg + (offset), value, mask) - -#define PHM_WAIT_ARRAY_REGISTER_UNEQUAL(hwmgr, reg, offset, value, mask) \ - PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg + (offset), value, mask) - -#define PHM_READ_ARRAY_FIELD(hwmgr, reg, offset, field) \ - PHM_GET_FIELD(PHM_READ_ARRAY_REGISTER(hwmgr->device, reg, offset), reg, field) - -#define PHM_WRITE_ARRAY_FIELD(hwmgr, reg, offset, field, fieldvalue) \ - PHM_WRITE_ARRAY_REGISTER(hwmgr->device, reg, offset, \ - PHM_SET_FIELD(PHM_READ_ARRAY_REGISTER(hwmgr->device, reg, offset), \ - reg, field, fieldvalue)) - -#define PHM_WAIT_ARRAY_FIELD(hwmgr, reg, offset, field, fieldvalue) \ - PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg + (offset), \ - (fieldvalue) << PHM_FIELD_SHIFT(reg, field), \ - PHM_FIELD_MASK(reg, field)) - -#define PHM_WAIT_ARRAY_FIELD_UNEQUAL(hwmgr, reg, offset, field, fieldvalue) \ - PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg + (offset), \ - (fieldvalue) << PHM_FIELD_SHIFT(reg, field), \ - PHM_FIELD_MASK(reg, field)) #endif /* _HWMGR_H_ */ From 93bdacd1bd7b81921c165e94bbe3dcefd6f88dc1 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 29 Jul 2016 19:15:52 +0800 Subject: [PATCH 052/171] drm/amd/powerplay: pass sub_device_id and sub_vendor_id to powerplay. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c | 3 +++ drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h | 2 ++ 2 files changed, 5 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index a9b3adc81420..85288502ad05 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -52,6 +52,9 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev) pp_init->chip_family = adev->family; pp_init->chip_id = adev->asic_type; pp_init->device = amdgpu_cgs_create_device(adev); + pp_init->rev_id = adev->pdev->revision; + pp_init->sub_sys_id = adev->pdev->subsystem_device; + pp_init->sub_vendor_id = adev->pdev->subsystem_vendor; ret = amd_powerplay_init(pp_init, amd_pp); kfree(pp_init); diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h index ba29a5b5a2d2..3f8172f545b0 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h @@ -132,6 +132,8 @@ struct amd_pp_init { uint32_t chip_family; uint32_t chip_id; uint32_t rev_id; + uint16_t sub_sys_id; + uint16_t sub_vendor_id; }; enum amd_pp_display_config_type{ From 5765a36d4c88d7ea2ff53e35cccca751c74716d8 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 29 Jul 2016 19:19:26 +0800 Subject: [PATCH 053/171] drm/amdgpu: AMD SW workaround for certain partner boards Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index ce0929bbf2e7..7e8cddb6d512 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -699,7 +699,10 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) polaris10_golden_common_all, (const u32)ARRAY_SIZE(polaris10_golden_common_all)); WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C); - if (adev->pdev->revision == 0xc7) { + if (adev->pdev->revision == 0xc7 && + ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) || + (adev->pdev->subsystem_device == 0x4a8 && adev->pdev->subsystem_vendor == 0x1043) || + (adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1682))) { amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD); amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0); } From 6391b6ab3246fe23840ed3635b17ad83f0fd0942 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 29 Jul 2016 19:26:36 +0800 Subject: [PATCH 054/171] drm/amd/powrplay: workaround Memory EDC Error for certain partner boards. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 2 ++ drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index ba8f57a415c7..d829076ed9ea 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -77,6 +77,8 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) hwmgr->chip_family = pp_init->chip_family; hwmgr->chip_id = pp_init->chip_id; hwmgr->hw_revision = pp_init->rev_id; + hwmgr->sub_sys_id = pp_init->sub_sys_id; + hwmgr->sub_vendor_id = pp_init->sub_vendor_id; hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; hwmgr->power_source = PP_PowerSource_AC; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c index 60cfcbde6f27..7d552966de3e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c @@ -3133,7 +3133,10 @@ int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr) table_info->vddc_lookup_table; uint32_t i; - if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7) { + if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7 && + ((hwmgr->sub_sys_id == 0xb37 && hwmgr->sub_vendor_id == 0x1002) || + (hwmgr->sub_sys_id == 0x4a8 && hwmgr->sub_vendor_id == 0x1043) || + (hwmgr->sub_sys_id == 0x9480 && hwmgr->sub_vendor_id == 0x1682))) { if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000) return 0; From 8e9fc800ec9e1a50a1fa7c38af8bf80d29dd85b4 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Sun, 31 Jul 2016 13:46:01 +0800 Subject: [PATCH 055/171] drm/amd/powrplay: delete code no longer in use on Polaris. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c index 7d552966de3e..98e01d17635a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c @@ -98,19 +98,6 @@ #define PCIE_BUS_CLK 10000 #define TCLK (PCIE_BUS_CLK / 10) - -static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] = -{ {600, 1050, 3, 0}, {600, 1050, 6, 1} }; - -/* [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */ -static const uint32_t polaris10_clock_stretcher_ddt_table[2][4][4] = -{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} }, - { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } }; - -/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */ -static const uint8_t polaris10_clock_stretch_amount_conversion[2][6] = -{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} }; - /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ enum DPM_EVENT_SRC { DPM_EVENT_SRC_ANALOG = 0, From ecf6a637090c8034bc0d843c48905bfa5e2a4e0c Mon Sep 17 00:00:00 2001 From: Ken Wang Date: Wed, 27 Jul 2016 19:12:15 +0800 Subject: [PATCH 056/171] drm/ttm: Add interface to export kernel_zone max memory size in ttm MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Ken Wang Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_memory.c | 6 ++++++ include/drm/ttm/ttm_memory.h | 1 + 2 files changed, 7 insertions(+) diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index a1803fbcc898..29855be96be0 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -600,3 +600,9 @@ size_t ttm_round_pot(size_t size) return 0; } EXPORT_SYMBOL(ttm_round_pot); + +uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob) +{ + return glob->zone_kernel->max_mem; +} +EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size); diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h index 72dcbe81dd07..c4520890f267 100644 --- a/include/drm/ttm/ttm_memory.h +++ b/include/drm/ttm/ttm_memory.h @@ -155,4 +155,5 @@ extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, extern void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page); extern size_t ttm_round_pot(size_t size); +extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob); #endif From a693e050edfe794fea81d7cfe72429a406aa380b Mon Sep 17 00:00:00 2001 From: Ken Wang Date: Wed, 27 Jul 2016 19:18:01 +0800 Subject: [PATCH 057/171] drm/amdgpu: use the kernel zone memory size as the max remote memory in amdgpu Signed-off-by: Ken Wang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 12 +++++++----- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 7 ++++++- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 7 ++++++- 4 files changed, 21 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 9f71ff79b9c1..12112cc65dae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -2428,6 +2428,8 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base); void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); +u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev); +int amdgpu_ttm_global_init(struct amdgpu_device *adev); void amdgpu_program_register_sequence(struct amdgpu_device *adev, const u32 *registers, const u32 array_size); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 5dc9c4c30777..819c62d27bf8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -74,7 +75,7 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref) ttm_mem_global_release(ref->object); } -static int amdgpu_ttm_global_init(struct amdgpu_device *adev) +int amdgpu_ttm_global_init(struct amdgpu_device *adev) { struct drm_global_reference *global_ref; struct amdgpu_ring *ring; @@ -987,10 +988,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) unsigned i, j; int r; - r = amdgpu_ttm_global_init(adev); - if (r) { - return r; - } /* No others user of address space so set it to 0 */ r = ttm_bo_device_init(&adev->mman.bdev, adev->mman.bo_global_ref.ref.object, @@ -1449,3 +1446,8 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) #endif } + +u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev) +{ + return ttm_get_kernel_zone_memory_size(adev->mman.mem_global_ref.object); +} diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 0b0f08641eed..bca50e590fdb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -393,7 +393,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) * size equal to the 1024 or vram, whichever is larger. */ if (amdgpu_gart_size == -1) - adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size); + adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev); else adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; @@ -953,6 +953,11 @@ static int gmc_v7_0_sw_init(void *handle) return r; } + r = amdgpu_ttm_global_init(adev); + if (r) { + return r; + } + r = gmc_v7_0_mc_init(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 0a23b8394705..3cbbf53c600b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -469,7 +469,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) * size equal to the 1024 or vram, whichever is larger. */ if (amdgpu_gart_size == -1) - adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size); + adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev); else adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; @@ -949,6 +949,11 @@ static int gmc_v8_0_sw_init(void *handle) return r; } + r = amdgpu_ttm_global_init(adev); + if (r) { + return r; + } + r = gmc_v8_0_mc_init(adev); if (r) return r; From f10379503e2b1814e27957899bcdfd0132a1915e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Ol=C5=A1=C3=A1k?= Date: Sat, 30 Jul 2016 00:48:39 +0200 Subject: [PATCH 058/171] drm/amdgpu: print more accurate error messages on IB submission failure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's useful for debugging. Signed-off-by: Marek Olšák Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 0307ff5887c5..8eb93dff69d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -386,8 +386,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates); - if (unlikely(r != 0)) + if (unlikely(r != 0)) { + DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); goto error_free_pages; + } /* Without a BO list we don't have userptr BOs */ if (!p->bo_list) @@ -427,9 +429,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, /* Unreserve everything again. */ ttm_eu_backoff_reservation(&p->ticket, &p->validated); - /* We tried to often, just abort */ + /* We tried too many times, just abort */ if (!--tries) { r = -EDEADLK; + DRM_ERROR("deadlock in %s\n", __func__); goto error_free_pages; } @@ -441,11 +444,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, sizeof(struct page*)); if (!e->user_pages) { r = -ENOMEM; + DRM_ERROR("calloc failure in %s\n", __func__); goto error_free_pages; } r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages); if (r) { + DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n"); drm_free_large(e->user_pages); e->user_pages = NULL; goto error_free_pages; @@ -462,12 +467,16 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, p->bytes_moved = 0; r = amdgpu_cs_list_validate(p, &duplicates); - if (r) + if (r) { + DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n"); goto error_validate; + } r = amdgpu_cs_list_validate(p, &p->validated); - if (r) + if (r) { + DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n"); goto error_validate; + } fpriv->vm.last_eviction_counter = atomic64_read(&p->adev->num_evictions); From 713c0021979a525b9e87dce8fd9d79dd0b99db4c Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Wed, 3 Aug 2016 09:25:59 -0400 Subject: [PATCH 059/171] drm/amdgpu: remove the check for sessions being closed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This will make clock and power gated when no block decoded, for example when paused during the playback. Signed-off-by: Leo Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index c22b64ec4ec1..bf59354d788a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1089,15 +1089,9 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) { struct amdgpu_device *adev = container_of(work, struct amdgpu_device, uvd.idle_work.work); - unsigned i, fences, handles = 0; + unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring); - fences = amdgpu_fence_count_emitted(&adev->uvd.ring); - - for (i = 0; i < adev->uvd.max_handles; ++i) - if (atomic_read(&adev->uvd.handles[i])) - ++handles; - - if (fences == 0 && handles == 0) { + if (fences == 0) { if (adev->pm.dpm_enabled) { amdgpu_dpm_enable_uvd(adev, false); } else { From f16fe6d303855b609618d5db8705a8ee4634e4bf Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Wed, 3 Aug 2016 10:14:44 -0400 Subject: [PATCH 060/171] drm/amd/amdgpu: fix indentation in vce3 CG MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index e5b18ad42721..26fb606e2d3e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -126,6 +126,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, bool gated) { u32 tmp, data; + /* Set Override to disable Clock Gating */ vce_v3_0_override_vce_clock_gating(adev, true); @@ -165,9 +166,9 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, /* Force VCE_UENC_DMA_DCLK_CTRL Clock ON */ tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | - 0x8; + VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | + VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | + 0x8; if (tmp != data) WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); } else { @@ -201,9 +202,9 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, /* Set VCE_UENC_DMA_DCLK_CTRL CG always in dynamic mode */ tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | - 0x8); + VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | + VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | + 0x8); if (tmp != data) WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); } From 6f906814a5a8045874910f9c21c3ffd758a22f65 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Wed, 3 Aug 2016 10:15:24 -0400 Subject: [PATCH 061/171] drm/amd/amdgpu: Fix VCE CG order and resume defaults CG was being enabled in reverse sense from dpm/powerplay. Also fix the default CLK_EN signal to enable all of the blocks. Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 36 +++++++++------------------ 1 file changed, 12 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 26fb606e2d3e..a9db5cbb622d 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -130,40 +130,35 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, /* Set Override to disable Clock Gating */ vce_v3_0_override_vce_clock_gating(adev, true); - if (!gated) { - /* Force CLOCK ON for VCE_CLOCK_GATING_B, - * {*_FORCE_ON, *_FORCE_OFF} = {1, 0} - * VREG can be FORCE ON or set to Dynamic, but can't be OFF - */ + /* This function enables MGCG which is controlled by firmware. + With the clocks in the gated state the core is still + accessible but the firmware will throttle the clocks on the + fly as necessary. + */ + if (gated) { tmp = data = RREG32(mmVCE_CLOCK_GATING_B); data |= 0x1ff; data &= ~0xef0000; if (tmp != data) WREG32(mmVCE_CLOCK_GATING_B, data); - /* Force CLOCK ON for VCE_UENC_CLOCK_GATING, - * {*_FORCE_ON, *_FORCE_OFF} = {1, 0} - */ tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING); data |= 0x3ff000; data &= ~0xffc00000; if (tmp != data) WREG32(mmVCE_UENC_CLOCK_GATING, data); - /* set VCE_UENC_CLOCK_GATING_2 */ tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2); data |= 0x2; - data &= ~0x2; + data &= ~0x00010000; if (tmp != data) WREG32(mmVCE_UENC_CLOCK_GATING_2, data); - /* Force CLOCK ON for VCE_UENC_REG_CLOCK_GATING */ tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); data |= 0x37f; if (tmp != data) WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); - /* Force VCE_UENC_DMA_DCLK_CTRL Clock ON */ tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | @@ -172,34 +167,27 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, if (tmp != data) WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); } else { - /* Force CLOCK OFF for VCE_CLOCK_GATING_B, - * {*, *_FORCE_OFF} = {*, 1} - * set VREG to Dynamic, as it can't be OFF - */ tmp = data = RREG32(mmVCE_CLOCK_GATING_B); data &= ~0x80010; data |= 0xe70008; if (tmp != data) WREG32(mmVCE_CLOCK_GATING_B, data); - /* Force CLOCK OFF for VCE_UENC_CLOCK_GATING, - * Force ClOCK OFF takes precedent over Force CLOCK ON setting. - * {*_FORCE_ON, *_FORCE_OFF} = {*, 1} - */ + tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING); data |= 0xffc00000; if (tmp != data) WREG32(mmVCE_UENC_CLOCK_GATING, data); - /* Set VCE_UENC_CLOCK_GATING_2 */ + tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2); data |= 0x10000; if (tmp != data) WREG32(mmVCE_UENC_CLOCK_GATING_2, data); - /* Set VCE_UENC_REG_CLOCK_GATING to dynamic */ + tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); data &= ~0xffc00000; if (tmp != data) WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); - /* Set VCE_UENC_DMA_DCLK_CTRL CG always in dynamic mode */ + tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | @@ -538,7 +526,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); - WREG32(mmVCE_CLOCK_GATING_B, 0xf7); + WREG32(mmVCE_CLOCK_GATING_B, 0x1FF); WREG32(mmVCE_LMI_CTRL, 0x00398000); WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); From 13d58d2a492269880e85d89e124edb814753fa37 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Wed, 3 Aug 2016 10:15:48 -0400 Subject: [PATCH 062/171] drm/amd/amdgpu: Don't set VCE CG in startup It's handled by DPM/PP properly. Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index a9db5cbb622d..311b384a02d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -285,10 +285,6 @@ static int vce_v3_0_start(struct amdgpu_device *adev) /* clear BUSY flag */ WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); - /* Set Clock-Gating off */ - if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) - vce_v3_0_set_vce_sw_clock_gating(adev, false); - if (r) { DRM_ERROR("VCE not responding, giving up!!!\n"); mutex_unlock(&adev->grbm_idx_mutex); From 1af69a2c355e9a6d3d9384697be923e34265f79c Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Wed, 3 Aug 2016 10:16:17 -0400 Subject: [PATCH 063/171] drm/amd/amdgpu: Enable VCE MGCG for Carrizo Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vi.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index aaee251fe337..de959a755e22 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1248,7 +1248,8 @@ static int vi_common_early_init(void *handle) AMD_CG_SUPPORT_HDP_MGCG | AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_SDMA_MGCG | - AMD_CG_SUPPORT_SDMA_LS; + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_VCE_MGCG; /* rev0 hardware requires workarounds to support PG */ adev->pg_flags = 0; if (adev->rev_id != 0x00) { From 8ef583e9f9df9b6371e3faecb3955350f0588a4c Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Wed, 3 Aug 2016 11:34:35 -0400 Subject: [PATCH 064/171] drm/amd/amdgpu: Enable VCE MGCG for Stoney Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vi.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index de959a755e22..8f37066adcee 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1276,7 +1276,8 @@ static int vi_common_early_init(void *handle) AMD_CG_SUPPORT_HDP_MGCG | AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_SDMA_MGCG | - AMD_CG_SUPPORT_SDMA_LS; + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_VCE_MGCG; adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG | AMD_PG_SUPPORT_GFX_PIPELINE | From 568ba157cdb561c8e52985d81bdabed3f6248f77 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Wed, 3 Aug 2016 12:37:23 -0400 Subject: [PATCH 065/171] drm/amd/amdgpu: Partially revert change to UVD v3 CG Partially undo changes made by commit: drm/amd/amdgpu: don't track state in UVD clockgating To keep bypass even if CG flags are not set. Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 391457f1eafd..c11b97f8e376 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -960,13 +960,13 @@ static int uvd_v6_0_set_clockgating_state(void *handle, { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) - return 0; - if (adev->asic_type == CHIP_FIJI || adev->asic_type == CHIP_POLARIS10) uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true : false); + if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) + return 0; + if (state == AMD_CG_STATE_GATE) { /* disable HW gating and enable Sw gating */ uvd_v6_0_set_sw_clock_gating(adev); From e72cfd58998834570b0ea9cd1961e4abe84945e1 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Wed, 27 Jul 2016 13:15:20 +0800 Subject: [PATCH 066/171] drm/amdgpu: irq resume should be immediately after gpu resume MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 9b358df6e886..c715f1bd4598 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2094,6 +2094,7 @@ retry: amdgpu_atombios_scratch_regs_restore(adev); } if (!r) { + amdgpu_irq_gpu_reset_resume_helper(adev); r = amdgpu_ib_ring_tests(adev); if (r) { dev_err(adev->dev, "ib ring test failed (%d).\n", r); @@ -2124,7 +2125,6 @@ retry: /* bad news, how to tell it to userspace ? */ dev_info(adev->dev, "GPU reset failed\n"); } - amdgpu_irq_gpu_reset_resume_helper(adev); return r; } From 9c0171b41248805d928863fc87190e6530b71773 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Wed, 3 Aug 2016 14:55:46 +0800 Subject: [PATCH 067/171] drm/amdgpu: fix harvest config checking in vce3 check_soft_reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 311b384a02d3..516882b07842 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -631,9 +631,6 @@ static int vce_v3_0_check_soft_reset(void *handle) tmp = REG_SET_FIELD(tmp, GRBM_GFX_INDEX, INSTANCE_INDEX, 0); WREG32(mmGRBM_GFX_INDEX, tmp); - if (adev->vce.harvest_config & (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) - srbm_soft_reset = 0; - if (srbm_soft_reset) { adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true; adev->vce.srbm_soft_reset = srbm_soft_reset; From ac8e3f304047e8f0229e217afaee25be918f8ae7 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 3 Aug 2016 15:10:28 +0800 Subject: [PATCH 068/171] drm/amdgpu: remove RB1_BUSY bit checking MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a workaround to let VCE soft reset work. RB1_BUSY bit is always set, so remove its checking now, and we will depend on RB0_BUSY currently. After we find the root cause of RB1_BUSY, we can add it back. Signed-off-by: Rex Zhu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 516882b07842..76d5d6893c89 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -589,7 +589,11 @@ static int vce_v3_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -#define AMDGPU_VCE_STATUS_BUSY_MASK 0x78 +#define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */ +#define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */ +#define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */ +#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ + VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) static int vce_v3_0_check_soft_reset(void *handle) { From bde1d8b228cfaca212bcf5f093b5c8c928af2bff Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Wed, 3 Aug 2016 15:15:37 +0800 Subject: [PATCH 069/171] drm/amdgpu: enable VCE soft reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 - drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 3 --- 2 files changed, 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index c715f1bd4598..72d5d094965d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1977,7 +1977,6 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev) { if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang || - adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) { DRM_INFO("Some block need full reset!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 76d5d6893c89..7e6bb45658f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -601,9 +601,6 @@ static int vce_v3_0_check_soft_reset(void *handle) u32 srbm_soft_reset = 0; u32 tmp; - /* VCE BUG: it is always busy, so skip its checking now */ - return 0; - /* According to VCE team , we should use VCE_STATUS instead * SRBM_STATUS.VCE_BUSY bit for busy status checking. * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE From 29425288ba76f2476b2a9375e1d171f631aa01dd Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 3 Aug 2016 14:37:19 +0800 Subject: [PATCH 070/171] drm/amd/powerplay: delete code set/unset deep_sleep/power_containment. they were controled by module parameter. Signed-off-by: Rex Zhu Reviewed-by: Ken Wang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | 3 --- drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c | 2 -- drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c | 6 +----- 3 files changed, 1 insertion(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 8cc0df9b534a..40153847675a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -221,9 +221,6 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NonABMSupportInPPLib); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicM3Arbiter); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c index c3b2f511e364..f5992ea0c56f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c @@ -56,8 +56,6 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) fiji_hwmgr->power_tune_defaults = &fiji_power_tune_data_set_array[0]; /* Assume disabled */ - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CAC); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c index d8ca59b283d3..f48f3eac5ba4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c @@ -3621,9 +3621,6 @@ static int iceland_hwmgr_backend_init(struct pp_hwmgr *hwmgr) data->engine_clock_data = 0; data->memory_clock_data = 0; - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleepAboveLow); @@ -3847,8 +3844,7 @@ static int iceland_hwmgr_backend_init(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_GFXClockGatingManagedInCAIL); phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_IcelandULPSSWWorkAround); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); + /* iceland doesn't support UVD and VCE */ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, From 173247569a95a63daa1a28b4c7e2c7d02a89d1a3 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 1 Aug 2016 10:49:00 +0800 Subject: [PATCH 071/171] drm/amd/powerplay: change structure variable name. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 5dba7c509710..704ff4cc0023 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -978,7 +978,7 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr) return 0; } -static const struct pp_smumgr_func ellsemere_smu_funcs = { +static const struct pp_smumgr_func polaris10_smu_funcs = { .smu_init = polaris10_smu_init, .smu_fini = polaris10_smu_fini, .start_smu = polaris10_start_smu, @@ -1001,7 +1001,7 @@ int polaris10_smum_init(struct pp_smumgr *smumgr) return -1; smumgr->backend = polaris10_smu; - smumgr->smumgr_funcs = &ellsemere_smu_funcs; + smumgr->smumgr_funcs = &polaris10_smu_funcs; return 0; } From fad2af195f1abaada473f4f9e9a554c1e4db768b Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Mon, 18 Apr 2016 23:29:32 +0800 Subject: [PATCH 072/171] drm/amd/powerplay: enable powerplay as default on CZ/ST Enable powerplay as default on Carrizo and Stoney. And it can be disabled with amdgpu.powerplay=0. Reviewed-by: Alex Deucher Signed-off-by: Huang Rui Cc: Rex Zhu Cc: Flora Cui Cc: Ken Wang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 85288502ad05..545074479e1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -109,11 +109,9 @@ static int amdgpu_pp_early_init(void *handle) case CHIP_TONGA: case CHIP_FIJI: case CHIP_TOPAZ: - adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true; - break; case CHIP_CARRIZO: case CHIP_STONEY: - adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false; + adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true; break; /* These chips don't have powerplay implemenations */ case CHIP_BONAIRE: From fdba11f4079ec0b3037a0e3ce6014b47135db431 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 31 Mar 2016 18:18:28 -0400 Subject: [PATCH 073/171] drm/amdgpu: move all Kconfig options to amdgpu/Kconfig For consistency. Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/Kconfig | 3 --- drivers/gpu/drm/amd/amdgpu/Kconfig | 2 ++ 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index fc357319de35..64f2a448a137 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -171,9 +171,6 @@ config DRM_AMDGPU If M is selected, the module will be called amdgpu. source "drivers/gpu/drm/amd/amdgpu/Kconfig" -source "drivers/gpu/drm/amd/powerplay/Kconfig" - -source "drivers/gpu/drm/amd/acp/Kconfig" source "drivers/gpu/drm/nouveau/Kconfig" diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index 7335c0420c70..f3cb69de0c44 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -25,3 +25,5 @@ config DRM_AMDGPU_GART_DEBUGFS Selecting this option creates a debugfs file to inspect the mapped pages. Uses more memory for housekeeping, enable only for debugging. +source "drivers/gpu/drm/amd/powerplay/Kconfig" +source "drivers/gpu/drm/amd/acp/Kconfig" From 7b8082bc0774b75a8a3f457f2399a85d4d515c56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Fri, 5 Aug 2016 18:36:10 +0900 Subject: [PATCH 074/171] drm/ttm: Wait for a BO to become idle before unbinding it from GTT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes hangs under memory pressure, e.g. running the piglit test tex3d-maxsize concurrently with other tests. Fixes: 17d33bc9d6ef ("drm/ttm: drop waiting for idle in ttm_bo_evict.") Reviewed-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 4 ++-- drivers/gpu/drm/nouveau/nouveau_bo.c | 4 ++-- drivers/gpu/drm/radeon/radeon_ttm.c | 4 ++-- drivers/gpu/drm/ttm/ttm_bo.c | 3 ++- drivers/gpu/drm/ttm/ttm_bo_util.c | 10 +++++++++- include/drm/ttm/ttm_bo_driver.h | 3 ++- 6 files changed, 19 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 819c62d27bf8..105bd229fdad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -336,7 +336,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, if (unlikely(r)) { goto out_cleanup; } - r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); + r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem); out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; @@ -369,7 +369,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, if (unlikely(r)) { return r; } - r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); + r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 528bdeffb339..6190035edfea 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1151,7 +1151,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, if (ret) goto out; - ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); + ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, new_mem); out: ttm_bo_mem_put(bo, &tmp_mem); return ret; @@ -1179,7 +1179,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, if (ret) return ret; - ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); + ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, &tmp_mem); if (ret) goto out; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index ffdad81ef964..0c00e192c845 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -346,7 +346,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, if (unlikely(r)) { goto out_cleanup; } - r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); + r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem); out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; @@ -379,7 +379,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, if (unlikely(r)) { return r; } - r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); + r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 4054d804fe06..42c074a9c955 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -354,7 +354,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) - ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); + ret = ttm_bo_move_ttm(bo, evict, interruptible, no_wait_gpu, + mem); else if (bdev->driver->move) ret = bdev->driver->move(bo, evict, interruptible, no_wait_gpu, mem); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 2df602a35f92..f157a9efd220 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -45,7 +45,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo) } int ttm_bo_move_ttm(struct ttm_buffer_object *bo, - bool evict, + bool evict, bool interruptible, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct ttm_tt *ttm = bo->ttm; @@ -53,6 +53,14 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, int ret; if (old_mem->mem_type != TTM_PL_SYSTEM) { + ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); + + if (unlikely(ret != 0)) { + if (ret != -ERESTARTSYS) + pr_err("Failed to expire sync object before unbinding TTM\n"); + return ret; + } + ttm_tt_unbind(ttm); ttm_bo_free_old_node(bo); ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 4348d6d5877a..99c6d01d24f2 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -962,6 +962,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev, * * @bo: A pointer to a struct ttm_buffer_object. * @evict: 1: This is an eviction. Don't try to pipeline. + * @interruptible: Sleep interruptible if waiting. * @no_wait_gpu: Return immediately if the GPU is busy. * @new_mem: struct ttm_mem_reg indicating where to move. * @@ -976,7 +977,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev, */ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, - bool evict, bool no_wait_gpu, + bool evict, bool interruptible, bool no_wait_gpu, struct ttm_mem_reg *new_mem); /** From 4e2f0caa3960c1890ca4a7650d5e6bebbcc8ca04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Mon, 8 Aug 2016 12:28:25 +0900 Subject: [PATCH 075/171] drm/ttm: Remove unused parameter evict from ttm_bo_move_ttm MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 4 ++-- drivers/gpu/drm/nouveau/nouveau_bo.c | 4 ++-- drivers/gpu/drm/radeon/radeon_ttm.c | 4 ++-- drivers/gpu/drm/ttm/ttm_bo.c | 3 +-- drivers/gpu/drm/ttm/ttm_bo_util.c | 4 ++-- include/drm/ttm/ttm_bo_driver.h | 3 +-- 6 files changed, 10 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 105bd229fdad..42c9066ccacb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -336,7 +336,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, if (unlikely(r)) { goto out_cleanup; } - r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem); + r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem); out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; @@ -369,7 +369,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, if (unlikely(r)) { return r; } - r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem); + r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 6190035edfea..01460d765a88 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1151,7 +1151,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, if (ret) goto out; - ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, new_mem); + ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_mem); out: ttm_bo_mem_put(bo, &tmp_mem); return ret; @@ -1179,7 +1179,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, if (ret) return ret; - ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, &tmp_mem); + ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_mem); if (ret) goto out; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 0c00e192c845..f8f474d2f2f7 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -346,7 +346,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, if (unlikely(r)) { goto out_cleanup; } - r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem); + r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem); out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; @@ -379,7 +379,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, if (unlikely(r)) { return r; } - r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem); + r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 42c074a9c955..2a750d443874 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -354,8 +354,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) - ret = ttm_bo_move_ttm(bo, evict, interruptible, no_wait_gpu, - mem); + ret = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, mem); else if (bdev->driver->move) ret = bdev->driver->move(bo, evict, interruptible, no_wait_gpu, mem); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index f157a9efd220..f87162fb1202 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -45,8 +45,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo) } int ttm_bo_move_ttm(struct ttm_buffer_object *bo, - bool evict, bool interruptible, - bool no_wait_gpu, struct ttm_mem_reg *new_mem) + bool interruptible, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) { struct ttm_tt *ttm = bo->ttm; struct ttm_mem_reg *old_mem = &bo->mem; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 99c6d01d24f2..12d348fb58da 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -961,7 +961,6 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev, * ttm_bo_move_ttm * * @bo: A pointer to a struct ttm_buffer_object. - * @evict: 1: This is an eviction. Don't try to pipeline. * @interruptible: Sleep interruptible if waiting. * @no_wait_gpu: Return immediately if the GPU is busy. * @new_mem: struct ttm_mem_reg indicating where to move. @@ -977,7 +976,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev, */ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, - bool evict, bool interruptible, bool no_wait_gpu, + bool interruptible, bool no_wait_gpu, struct ttm_mem_reg *new_mem); /** From 4499f2acd57b568e8fa2c7908a8a3cf2802629c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Mon, 8 Aug 2016 12:28:26 +0900 Subject: [PATCH 076/171] drm/ttm: Remove unused parameter evict from ttm_bo_move_memcpy MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 3 +-- drivers/gpu/drm/nouveau/nouveau_bo.c | 2 +- drivers/gpu/drm/qxl/qxl_ttm.c | 4 ++-- drivers/gpu/drm/radeon/radeon_ttm.c | 3 +-- drivers/gpu/drm/ttm/ttm_bo.c | 3 +-- drivers/gpu/drm/ttm/ttm_bo_util.c | 3 +-- include/drm/ttm/ttm_bo_driver.h | 4 +--- 7 files changed, 8 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 42c9066ccacb..3cf54c1b21d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -436,8 +436,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, if (r) { memcpy: - r = ttm_bo_move_memcpy(bo, evict, interruptible, - no_wait_gpu, new_mem); + r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem); if (r) { return r; } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 01460d765a88..8ab9ce5089fe 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1297,7 +1297,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, /* Fallback to software copy. */ ret = ttm_bo_wait(bo, intr, no_wait_gpu); if (ret == 0) - ret = ttm_bo_move_memcpy(bo, evict, intr, no_wait_gpu, new_mem); + ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_mem); out: if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index d50c9679e631..6a22de045cb5 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -361,8 +361,8 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, qxl_move_null(bo, new_mem); return 0; } - return ttm_bo_move_memcpy(bo, evict, interruptible, - no_wait_gpu, new_mem); + return ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, + new_mem); } static void qxl_bo_move_notify(struct ttm_buffer_object *bo, diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index f8f474d2f2f7..93414aca60d6 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -444,8 +444,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, if (r) { memcpy: - r = ttm_bo_move_memcpy(bo, evict, interruptible, - no_wait_gpu, new_mem); + r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem); if (r) { return r; } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 2a750d443874..c2a30bdc8a01 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -359,8 +359,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ret = bdev->driver->move(bo, evict, interruptible, no_wait_gpu, mem); else - ret = ttm_bo_move_memcpy(bo, evict, interruptible, - no_wait_gpu, mem); + ret = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, mem); if (ret) { if (bdev->driver->move_notify) { diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index f87162fb1202..bf6e21655c57 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -329,8 +329,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, } int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, - bool evict, bool interruptible, - bool no_wait_gpu, + bool interruptible, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 12d348fb58da..c986fa7effd2 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -983,7 +983,6 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, * ttm_bo_move_memcpy * * @bo: A pointer to a struct ttm_buffer_object. - * @evict: 1: This is an eviction. Don't try to pipeline. * @interruptible: Sleep interruptible if waiting. * @no_wait_gpu: Return immediately if the GPU is busy. * @new_mem: struct ttm_mem_reg indicating where to move. @@ -999,8 +998,7 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, */ extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, - bool evict, bool interruptible, - bool no_wait_gpu, + bool interruptible, bool no_wait_gpu, struct ttm_mem_reg *new_mem); /** From 1969753907286e5bf5b37c477113f77fbb6159e2 Mon Sep 17 00:00:00 2001 From: Lyude Date: Fri, 5 Aug 2016 20:30:34 -0400 Subject: [PATCH 077/171] drm/radeon: Don't print error on aux transaction timeouts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since it's normal for DRM to retry our aux transaction helpers multiple times in a row, up to 32 times for each attempted transaction, we're making a lot of noise that is no longer necessary now that DRM will just print the return code we give it. Reviewed-by: Christian König Signed-off-by: Lyude Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_dp_auxch.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c index db64e0062689..2d465648856a 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c +++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c @@ -164,7 +164,6 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg } if (tmp & AUX_SW_RX_TIMEOUT) { - DRM_DEBUG_KMS("dp_aux_ch timed out\n"); ret = -ETIMEDOUT; goto done; } From 84cefe18e3951d985b1179b35d14fae9e4ae2229 Mon Sep 17 00:00:00 2001 From: Lyude Date: Fri, 5 Aug 2016 20:30:35 -0400 Subject: [PATCH 078/171] drm/radeon: Don't retry 7 times in radeon_dp_dpcd() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When this code was written, we didn't retry DP aux transactions on any error, which required retrying important transactions like this in individual drivers. Since that's no longer the case, retrying here is not necessary. As well, we retry any aux transaction on any error 32 times. 7 * 32 = 224, which means this loop causes us to retry grabbing the dpcd 224 times. This is definitely far more then we actually need to do. Reviewed-by: Christian König Signed-off-by: Lyude Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/atombios_dp.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index cead089a9e7d..432cb46f6a34 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -389,22 +389,21 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) { struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; u8 msg[DP_DPCD_SIZE]; - int ret, i; + int ret; - for (i = 0; i < 7; i++) { - ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, - DP_DPCD_SIZE); - if (ret == DP_DPCD_SIZE) { - memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); + ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, + DP_DPCD_SIZE); + if (ret == DP_DPCD_SIZE) { + memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); - DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), - dig_connector->dpcd); + DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), + dig_connector->dpcd); - radeon_dp_probe_oui(radeon_connector); + radeon_dp_probe_oui(radeon_connector); - return true; - } + return true; } + dig_connector->dpcd[0] = 0; return false; } From aff802954e06c435f582e8725964d66e21c8d573 Mon Sep 17 00:00:00 2001 From: Lyude Date: Fri, 5 Aug 2016 20:30:36 -0400 Subject: [PATCH 079/171] drm/amdgpu: Don't print error on aux transaction timeouts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since it's normal for DRM to retry our aux transaction helpers multiple times in a row, up to 32 times for each attempted transaction, we're making a lot of noise that is no longer necessary now that DRM will just print the return code we give it. Reviewed-by: Christian König Signed-off-by: Lyude Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/atombios_dp.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c index 7f85c2c1d681..166dc7bdcfee 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c @@ -88,7 +88,6 @@ static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan, /* timeout */ if (args.v2.ucReplyStatus == 1) { - DRM_DEBUG_KMS("dp_aux_ch timeout\n"); r = -ETIMEDOUT; goto done; } From 14a2fb487bed51d6020ed5d950e9f2cf17b5421b Mon Sep 17 00:00:00 2001 From: Lyude Date: Fri, 5 Aug 2016 20:30:37 -0400 Subject: [PATCH 080/171] drm/amdgpu: Don't retry 7 times in amdgpu_atombios_dp_get_dpcd() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When this code was written, we didn't retry DP aux transactions on any error, which required retrying important transactions like this in individual drivers. Since that's no longer the case, retrying here is not necessary. As well, we retry any aux transaction on any error 32 times. 7 * 32 = 224, which means this loop causes us to retry grabbing the dpcd 224 times. This is definitely far more then we actually need to do. Reviewed-by: Christian König Signed-off-by: Lyude Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/atombios_dp.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c index 166dc7bdcfee..f81068ba4cc6 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c @@ -338,22 +338,21 @@ int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector) { struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv; u8 msg[DP_DPCD_SIZE]; - int ret, i; + int ret; - for (i = 0; i < 7; i++) { - ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV, msg, - DP_DPCD_SIZE); - if (ret == DP_DPCD_SIZE) { - memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); + ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV, + msg, DP_DPCD_SIZE); + if (ret == DP_DPCD_SIZE) { + memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); - DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), - dig_connector->dpcd); + DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), + dig_connector->dpcd); - amdgpu_atombios_dp_probe_oui(amdgpu_connector); + amdgpu_atombios_dp_probe_oui(amdgpu_connector); - return 0; - } + return 0; } + dig_connector->dpcd[0] = 0; return -EINVAL; } From 16925c92dbd97524655525b6816625e1f0063d12 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Mon, 8 Aug 2016 11:30:34 +0800 Subject: [PATCH 081/171] drm/amdgpu: Add virtual connector and encoder macros. For virtual display feature, add virtual connector and encoder macros. Signed-off-by: Emily Deng Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/ObjectID.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h index 06192698bd96..b8d66670bb17 100644 --- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h +++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h @@ -90,6 +90,7 @@ #define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24 #define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25 #define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27 +#define ENCODER_OBJECT_ID_VIRTUAL 0x28 #define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF @@ -119,6 +120,7 @@ #define CONNECTOR_OBJECT_ID_eDP 0x14 #define CONNECTOR_OBJECT_ID_MXM 0x15 #define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16 +#define CONNECTOR_OBJECT_ID_VIRTUAL 0x17 /* deleted */ @@ -147,6 +149,7 @@ #define GRAPH_OBJECT_ENUM_ID5 0x05 #define GRAPH_OBJECT_ENUM_ID6 0x06 #define GRAPH_OBJECT_ENUM_ID7 0x07 +#define GRAPH_OBJECT_ENUM_VIRTUAL 0x08 /****************************************************/ /* Graphics Object ID Bit definition */ @@ -408,6 +411,10 @@ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT) +#define ENCODER_VIRTUAL_ENUM_VIRTUAL ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_VIRTUAL << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_VIRTUAL << OBJECT_ID_SHIFT) + /****************************************************/ /* Connector Object ID definition - Shared with BIOS */ /****************************************************/ From c6e14f40ba1c2d23be03f09ace4b4d78bfc066e4 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Mon, 8 Aug 2016 11:30:50 +0800 Subject: [PATCH 082/171] drm/amdgpu: Initialize dce_virtual_ip_funcs For virtual display feature, first need to initialize dce_virtual_ip_funcs, which will be used when set ip blocks. Signed-off-by: Emily Deng Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 5 +- drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 308 ++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/dce_virtual.h | 29 ++ 4 files changed, 343 insertions(+), 2 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/dce_virtual.c create mode 100644 drivers/gpu/drm/amd/amdgpu/dce_virtual.h diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index c7fcdcedaadb..21dd7c00da15 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -58,7 +58,8 @@ amdgpu-y += \ # add DCE block amdgpu-y += \ dce_v10_0.o \ - dce_v11_0.o + dce_v11_0.o \ + dce_virtual.o # add GFX block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 76f96028313d..abeb6aaf89a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -335,7 +335,7 @@ int amdgpu_crtc_set_config(struct drm_mode_set *set) return ret; } -static const char *encoder_names[38] = { +static const char *encoder_names[41] = { "NONE", "INTERNAL_LVDS", "INTERNAL_TMDS1", @@ -374,6 +374,9 @@ static const char *encoder_names[38] = { "TRAVIS", "INTERNAL_VCE", "INTERNAL_UNIPHY3", + "HDMI_ANX9805", + "INTERNAL_AMCLK", + "VIRTUAL", }; static const char *hpd_names[6] = { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c new file mode 100644 index 000000000000..953ee1a501a5 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -0,0 +1,308 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "drmP.h" +#include "amdgpu.h" +#include "amdgpu_pm.h" +#include "amdgpu_i2c.h" +#include "atom.h" +#include "amdgpu_atombios.h" +#include "atombios_crtc.h" +#include "atombios_encoders.h" +#include "amdgpu_pll.h" +#include "amdgpu_connectors.h" + +static void dce_virtual_set_display_funcs(struct amdgpu_device *adev); +static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev); + +static const struct drm_crtc_funcs dce_virtual_crtc_funcs = { + .cursor_set2 = NULL, + .cursor_move = NULL, + .gamma_set = NULL, + .set_config = NULL, + .destroy = NULL, + .page_flip = NULL, +}; + +static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = { + .dpms = NULL, + .mode_fixup = NULL, + .mode_set = NULL, + .mode_set_base = NULL, + .mode_set_base_atomic = NULL, + .prepare = NULL, + .commit = NULL, + .load_lut = NULL, + .disable = NULL, +}; + +static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index) +{ + struct amdgpu_crtc *amdgpu_crtc; + int i; + + amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + + (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); + if (amdgpu_crtc == NULL) + return -ENOMEM; + + drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_virtual_crtc_funcs); + + drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); + amdgpu_crtc->crtc_id = index; + adev->mode_info.crtcs[index] = amdgpu_crtc; + + for (i = 0; i < 256; i++) { + amdgpu_crtc->lut_r[i] = i << 2; + amdgpu_crtc->lut_g[i] = i << 2; + amdgpu_crtc->lut_b[i] = i << 2; + } + + amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; + drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs); + + return 0; +} + +static int dce_virtual_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + dce_virtual_set_display_funcs(adev); + dce_virtual_set_irq_funcs(adev); + + adev->mode_info.num_crtc = 1; + adev->mode_info.num_hpd = 1; + adev->mode_info.num_dig = 1; + return 0; +} + +static bool dce_virtual_get_connector_info(struct amdgpu_device *adev) +{ + struct amdgpu_i2c_bus_rec ddc_bus; + struct amdgpu_router router; + struct amdgpu_hpd hpd; + + /* look up gpio for ddc, hpd */ + ddc_bus.valid = false; + hpd.hpd = AMDGPU_HPD_NONE; + /* needed for aux chan transactions */ + ddc_bus.hpd = hpd.hpd; + + memset(&router, 0, sizeof(router)); + router.ddc_valid = false; + router.cd_valid = false; + amdgpu_display_add_connector(adev, + 0, + ATOM_DEVICE_CRT1_SUPPORT, + DRM_MODE_CONNECTOR_VIRTUAL, &ddc_bus, + CONNECTOR_OBJECT_ID_VIRTUAL, + &hpd, + &router); + + amdgpu_display_add_encoder(adev, ENCODER_VIRTUAL_ENUM_VIRTUAL, + ATOM_DEVICE_CRT1_SUPPORT, + 0); + + amdgpu_link_encoder_connector(adev->ddev); + + return true; +} + +static int dce_virtual_sw_init(void *handle) +{ + int r, i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_irq_add_id(adev, 229, &adev->crtc_irq); + if (r) + return r; + + adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; + + adev->ddev->mode_config.max_width = 16384; + adev->ddev->mode_config.max_height = 16384; + + adev->ddev->mode_config.preferred_depth = 24; + adev->ddev->mode_config.prefer_shadow = 1; + + adev->ddev->mode_config.fb_base = adev->mc.aper_base; + + r = amdgpu_modeset_create_props(adev); + if (r) + return r; + + adev->ddev->mode_config.max_width = 16384; + adev->ddev->mode_config.max_height = 16384; + + /* allocate crtcs */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + r = dce_virtual_crtc_init(adev, i); + if (r) + return r; + } + + dce_virtual_get_connector_info(adev); + amdgpu_print_display_setup(adev->ddev); + + drm_kms_helper_poll_init(adev->ddev); + + adev->mode_info.mode_config_initialized = true; + return 0; +} + +static int dce_virtual_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + kfree(adev->mode_info.bios_hardcoded_edid); + + drm_kms_helper_poll_fini(adev->ddev); + + drm_mode_config_cleanup(adev->ddev); + adev->mode_info.mode_config_initialized = false; + return 0; +} + +static int dce_virtual_hw_init(void *handle) +{ + return 0; +} + +static int dce_virtual_hw_fini(void *handle) +{ + return 0; +} + +static int dce_virtual_suspend(void *handle) +{ + return dce_virtual_hw_fini(handle); +} + +static int dce_virtual_resume(void *handle) +{ + int ret; + + ret = dce_virtual_hw_init(handle); + + return ret; +} + +static bool dce_virtual_is_idle(void *handle) +{ + return true; +} + +static int dce_virtual_wait_for_idle(void *handle) +{ + return 0; +} + +static int dce_virtual_soft_reset(void *handle) +{ + return 0; +} + +static int dce_virtual_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int dce_virtual_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +const struct amd_ip_funcs dce_virtual_ip_funcs = { + .name = "dce_virtual", + .early_init = dce_virtual_early_init, + .late_init = NULL, + .sw_init = dce_virtual_sw_init, + .sw_fini = dce_virtual_sw_fini, + .hw_init = dce_virtual_hw_init, + .hw_fini = dce_virtual_hw_fini, + .suspend = dce_virtual_suspend, + .resume = dce_virtual_resume, + .is_idle = dce_virtual_is_idle, + .wait_for_idle = dce_virtual_wait_for_idle, + .soft_reset = dce_virtual_soft_reset, + .set_clockgating_state = dce_virtual_set_clockgating_state, + .set_powergating_state = dce_virtual_set_powergating_state, +}; + +static const struct amdgpu_display_funcs dce_virtual_display_funcs = { + .set_vga_render_state = NULL, + .bandwidth_update = NULL, + .vblank_get_counter = NULL, + .vblank_wait = NULL, + .is_display_hung = NULL, + .backlight_set_level = NULL, + .backlight_get_level = NULL, + .hpd_sense = NULL, + .hpd_set_polarity = NULL, + .hpd_get_gpio_reg = NULL, + .page_flip = NULL, + .page_flip_get_scanoutpos = NULL, + .add_encoder = NULL, + .add_connector = &amdgpu_connector_add, + .stop_mc_access = NULL, + .resume_mc_access = NULL, +}; + +static void dce_virtual_set_display_funcs(struct amdgpu_device *adev) +{ + if (adev->mode_info.funcs == NULL) + adev->mode_info.funcs = &dce_virtual_display_funcs; +} + +static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = { + .set = NULL, + .process = NULL, +}; + +static const struct amdgpu_irq_src_funcs dce_virtual_pageflip_irq_funcs = { + .set = NULL, + .process = NULL, +}; + +static const struct amdgpu_irq_src_funcs dce_virtual_hpd_irq_funcs = { + .set = NULL, + .process = NULL, +}; + +static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; + adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs; + + adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST; + adev->pageflip_irq.funcs = &dce_virtual_pageflip_irq_funcs; + + adev->hpd_irq.num_types = AMDGPU_HPD_LAST; + adev->hpd_irq.funcs = &dce_virtual_hpd_irq_funcs; +} + + diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h new file mode 100644 index 000000000000..d205d7f6f6ab --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h @@ -0,0 +1,29 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DCE_VIRTUAL_H__ +#define __DCE_VIRTUAL_H__ + +extern const struct amd_ip_funcs dce_virtual_ip_funcs; +#endif + From 8e6de75bd1d6fa84c4338f6e2276eb2bc339f5c9 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Mon, 8 Aug 2016 11:31:13 +0800 Subject: [PATCH 083/171] drm/amdgpu: Initialize dce_virtual_display_funcs. For virtual display feature, initialize dce_virtual_display_funcs, which will be used in function dce_virtual_set_display_funcs. Signed-off-by: Emily Deng Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 222 +++++++++++++++++++++-- 1 file changed, 208 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 953ee1a501a5..2da8847f8938 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -34,6 +34,97 @@ static void dce_virtual_set_display_funcs(struct amdgpu_device *adev); static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev); +/** + * dce_virtual_vblank_wait - vblank wait asic callback. + * + * @adev: amdgpu_device pointer + * @crtc: crtc to wait for vblank on + * + * Wait for vblank on the requested crtc (evergreen+). + */ +static void dce_virtual_vblank_wait(struct amdgpu_device *adev, int crtc) +{ + return; +} + +static u32 dce_virtual_vblank_get_counter(struct amdgpu_device *adev, int crtc) +{ + if (crtc >= adev->mode_info.num_crtc) + return 0; + else + return adev->ddev->vblank[crtc].count; +} + +static void dce_virtual_page_flip(struct amdgpu_device *adev, + int crtc_id, u64 crtc_base, bool async) +{ + return; +} + +static int dce_virtual_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, + u32 *vbl, u32 *position) +{ + if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) + return -EINVAL; + + *vbl = 0; + *position = 0; + + return 0; +} + +static bool dce_virtual_hpd_sense(struct amdgpu_device *adev, + enum amdgpu_hpd_id hpd) +{ + return true; +} + +static void dce_virtual_hpd_set_polarity(struct amdgpu_device *adev, + enum amdgpu_hpd_id hpd) +{ + return; +} + +static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev) +{ + return 0; +} + +static bool dce_virtual_is_display_hung(struct amdgpu_device *adev) +{ + return false; +} + +void dce_virtual_stop_mc_access(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + return; +} +void dce_virtual_resume_mc_access(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + return; +} + +void dce_virtual_set_vga_render_state(struct amdgpu_device *adev, + bool render) +{ + return; +} + +/** + * dce_virtual_bandwidth_update - program display watermarks + * + * @adev: amdgpu_device pointer + * + * Calculate and program the display watermarks and line + * buffer allocation (CIK). + */ +static void dce_virtual_bandwidth_update(struct amdgpu_device *adev) +{ + return; +} + static const struct drm_crtc_funcs dce_virtual_crtc_funcs = { .cursor_set2 = NULL, .cursor_move = NULL, @@ -253,23 +344,127 @@ const struct amd_ip_funcs dce_virtual_ip_funcs = { .set_powergating_state = dce_virtual_set_powergating_state, }; +/* these are handled by the primary encoders */ +static void dce_virtual_encoder_prepare(struct drm_encoder *encoder) +{ + return; +} + +static void dce_virtual_encoder_commit(struct drm_encoder *encoder) +{ + return; +} + +static void +dce_virtual_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return; +} + +static void dce_virtual_encoder_disable(struct drm_encoder *encoder) +{ + return; +} + +static void +dce_virtual_encoder_dpms(struct drm_encoder *encoder, int mode) +{ + return; +} + +static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + + /* set the active encoder to connector routing */ + amdgpu_encoder_set_active_device(encoder); + + return true; +} + +static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs = { + .dpms = dce_virtual_encoder_dpms, + .mode_fixup = dce_virtual_encoder_mode_fixup, + .prepare = dce_virtual_encoder_prepare, + .mode_set = dce_virtual_encoder_mode_set, + .commit = dce_virtual_encoder_commit, + .disable = dce_virtual_encoder_disable, +}; + +static void dce_virtual_encoder_destroy(struct drm_encoder *encoder) +{ + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + + kfree(amdgpu_encoder->enc_priv); + drm_encoder_cleanup(encoder); + kfree(amdgpu_encoder); +} + +static const struct drm_encoder_funcs dce_virtual_encoder_funcs = { + .destroy = dce_virtual_encoder_destroy, +}; + +static void dce_virtual_encoder_add(struct amdgpu_device *adev, + uint32_t encoder_enum, + uint32_t supported_device, + u16 caps) +{ + struct drm_device *dev = adev->ddev; + struct drm_encoder *encoder; + struct amdgpu_encoder *amdgpu_encoder; + + /* see if we already added it */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + amdgpu_encoder = to_amdgpu_encoder(encoder); + if (amdgpu_encoder->encoder_enum == encoder_enum) { + amdgpu_encoder->devices |= supported_device; + return; + } + + } + + /* add a new one */ + amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); + if (!amdgpu_encoder) + return; + + encoder = &amdgpu_encoder->base; + encoder->possible_crtcs = 0x1; + amdgpu_encoder->enc_priv = NULL; + amdgpu_encoder->encoder_enum = encoder_enum; + amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; + amdgpu_encoder->devices = supported_device; + amdgpu_encoder->rmx_type = RMX_OFF; + amdgpu_encoder->underscan_type = UNDERSCAN_OFF; + amdgpu_encoder->is_ext_encoder = false; + amdgpu_encoder->caps = caps; + + drm_encoder_init(dev, encoder, &dce_virtual_encoder_funcs, + DRM_MODE_ENCODER_VIRTUAL, NULL); + drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs); + DRM_INFO("[FM]encoder: %d is VIRTUAL\n", amdgpu_encoder->encoder_id); +} + static const struct amdgpu_display_funcs dce_virtual_display_funcs = { - .set_vga_render_state = NULL, - .bandwidth_update = NULL, - .vblank_get_counter = NULL, - .vblank_wait = NULL, - .is_display_hung = NULL, + .set_vga_render_state = &dce_virtual_set_vga_render_state, + .bandwidth_update = &dce_virtual_bandwidth_update, + .vblank_get_counter = &dce_virtual_vblank_get_counter, + .vblank_wait = &dce_virtual_vblank_wait, + .is_display_hung = &dce_virtual_is_display_hung, .backlight_set_level = NULL, .backlight_get_level = NULL, - .hpd_sense = NULL, - .hpd_set_polarity = NULL, - .hpd_get_gpio_reg = NULL, - .page_flip = NULL, - .page_flip_get_scanoutpos = NULL, - .add_encoder = NULL, + .hpd_sense = &dce_virtual_hpd_sense, + .hpd_set_polarity = &dce_virtual_hpd_set_polarity, + .hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg, + .page_flip = &dce_virtual_page_flip, + .page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos, + .add_encoder = &dce_virtual_encoder_add, .add_connector = &amdgpu_connector_add, - .stop_mc_access = NULL, - .resume_mc_access = NULL, + .stop_mc_access = &dce_virtual_stop_mc_access, + .resume_mc_access = &dce_virtual_resume_mc_access, }; static void dce_virtual_set_display_funcs(struct amdgpu_device *adev) @@ -305,4 +500,3 @@ static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev) adev->hpd_irq.funcs = &dce_virtual_hpd_irq_funcs; } - From e13273d4a4702f7cb21a5f6e94919a5b52c45c32 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Mon, 8 Aug 2016 11:31:37 +0800 Subject: [PATCH 084/171] drm/amdgpu: Initialize crtc, pageflip irq funcs (v2) For virtual display feature, initialize dce_virtual_crtc_irq_funcs, dce_virtual_pageflip_irq_funcs. As it has no dce engine, so the pageflip interrupt won't be generated, and the vsync interrupt will be generated by smu's periodic timer or software timer which will be implemented later. v2: agd: rebase on upstream Signed-off-by: Emily Deng Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 130 ++++++++++++++++++++--- 1 file changed, 118 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 2da8847f8938..985b276e6b25 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -473,19 +473,128 @@ static void dce_virtual_set_display_funcs(struct amdgpu_device *adev) adev->mode_info.funcs = &dce_virtual_display_funcs; } +static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, + int crtc, + enum amdgpu_interrupt_state state) +{ + if (crtc >= adev->mode_info.num_crtc) { + DRM_DEBUG("invalid crtc %d\n", crtc); + return; + } +} + +static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + switch (type) { + case AMDGPU_CRTC_IRQ_VBLANK1: + dce_virtual_set_crtc_vblank_interrupt_state(adev, 0, state); + break; + default: + break; + } + return 0; +} + +static void dce_virtual_crtc_vblank_int_ack(struct amdgpu_device *adev, + int crtc) +{ + if (crtc >= adev->mode_info.num_crtc) { + DRM_DEBUG("invalid crtc %d\n", crtc); + return; + } +} + +static int dce_virtual_crtc_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + unsigned crtc = 0; + unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1; + + adev->ddev->vblank[crtc].count++; + dce_virtual_crtc_vblank_int_ack(adev, crtc); + + if (amdgpu_irq_enabled(adev, source, irq_type)) { + drm_handle_vblank(adev->ddev, crtc); + } + + DRM_DEBUG("IH: D%d vblank\n", crtc + 1); + return 0; +} + +static int dce_virtual_set_pageflip_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + if (type >= adev->mode_info.num_crtc) { + DRM_ERROR("invalid pageflip crtc %d\n", type); + return -EINVAL; + } + DRM_DEBUG("[FM]set pageflip irq type %d state %d\n", type, state); + + return 0; +} + +static int dce_virtual_pageflip_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + unsigned long flags; + unsigned crtc_id = 0; + struct amdgpu_crtc *amdgpu_crtc; + struct amdgpu_flip_work *works; + + crtc_id = 0; + amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; + + if (crtc_id >= adev->mode_info.num_crtc) { + DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); + return -EINVAL; + } + + /* IRQ could occur when in initial stage */ + if (amdgpu_crtc == NULL) + return 0; + + spin_lock_irqsave(&adev->ddev->event_lock, flags); + works = amdgpu_crtc->pflip_works; + if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { + DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " + "AMDGPU_FLIP_SUBMITTED(%d)\n", + amdgpu_crtc->pflip_status, + AMDGPU_FLIP_SUBMITTED); + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + return 0; + } + + /* page flip completed. clean up */ + amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; + amdgpu_crtc->pflip_works = NULL; + + /* wakeup usersapce */ + if (works->event) + drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); + + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + + drm_crtc_vblank_put(&amdgpu_crtc->base); + schedule_work(&works->unpin_work); + + return 0; +} + static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = { - .set = NULL, - .process = NULL, + .set = dce_virtual_set_crtc_irq_state, + .process = dce_virtual_crtc_irq, }; static const struct amdgpu_irq_src_funcs dce_virtual_pageflip_irq_funcs = { - .set = NULL, - .process = NULL, -}; - -static const struct amdgpu_irq_src_funcs dce_virtual_hpd_irq_funcs = { - .set = NULL, - .process = NULL, + .set = dce_virtual_set_pageflip_irq_state, + .process = dce_virtual_pageflip_irq, }; static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev) @@ -495,8 +604,5 @@ static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev) adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST; adev->pageflip_irq.funcs = &dce_virtual_pageflip_irq_funcs; - - adev->hpd_irq.num_types = AMDGPU_HPD_LAST; - adev->hpd_irq.funcs = &dce_virtual_hpd_irq_funcs; } From f1f5ef92679fd9871b9642916c32b78fc333642a Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Mon, 8 Aug 2016 11:32:00 +0800 Subject: [PATCH 085/171] drm/amdgpu: Initialize dce_virtual_crtc_helper_funcs For virtual display feature, initialize dce_virtual_crtc_helper_funcs. Signed-off-by: Emily Deng Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 137 +++++++++++++++++++++-- 1 file changed, 128 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 985b276e6b25..11f04ec51c9c 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -134,16 +134,135 @@ static const struct drm_crtc_funcs dce_virtual_crtc_funcs = { .page_flip = NULL, }; +static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + unsigned type; + + switch (mode) { + case DRM_MODE_DPMS_ON: + amdgpu_crtc->enabled = true; + /* Make sure VBLANK and PFLIP interrupts are still enabled */ + type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); + amdgpu_irq_update(adev, &adev->crtc_irq, type); + amdgpu_irq_update(adev, &adev->pageflip_irq, type); + drm_vblank_on(dev, amdgpu_crtc->crtc_id); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + drm_vblank_off(dev, amdgpu_crtc->crtc_id); + amdgpu_crtc->enabled = false; + break; + } +} + + +static void dce_virtual_crtc_prepare(struct drm_crtc *crtc) +{ + dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); +} + +static void dce_virtual_crtc_commit(struct drm_crtc *crtc) +{ + dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_ON); +} + +static void dce_virtual_crtc_disable(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + if (crtc->primary->fb) { + int r; + struct amdgpu_framebuffer *amdgpu_fb; + struct amdgpu_bo *rbo; + + amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); + rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r)) + DRM_ERROR("failed to reserve rbo before unpin\n"); + else { + amdgpu_bo_unpin(rbo); + amdgpu_bo_unreserve(rbo); + } + } + + amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; +} + +static int dce_virtual_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, struct drm_framebuffer *old_fb) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + /* update the hw version fpr dpm */ + amdgpu_crtc->hw_mode = *adjusted_mode; + + return 0; +} + +static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_encoder *encoder; + + /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + amdgpu_crtc->encoder = encoder; + amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); + break; + } + } + if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; + return false; + } + + return true; +} + + +static int dce_virtual_crtc_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + return 0; +} + +static void dce_virtual_crtc_load_lut(struct drm_crtc *crtc) +{ + return; +} + +static int dce_virtual_crtc_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, enum mode_set_atomic state) +{ + return 0; +} + static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = { - .dpms = NULL, - .mode_fixup = NULL, - .mode_set = NULL, - .mode_set_base = NULL, - .mode_set_base_atomic = NULL, - .prepare = NULL, - .commit = NULL, - .load_lut = NULL, - .disable = NULL, + .dpms = dce_virtual_crtc_dpms, + .mode_fixup = dce_virtual_crtc_mode_fixup, + .mode_set = dce_virtual_crtc_mode_set, + .mode_set_base = dce_virtual_crtc_set_base, + .mode_set_base_atomic = dce_virtual_crtc_set_base_atomic, + .prepare = dce_virtual_crtc_prepare, + .commit = dce_virtual_crtc_commit, + .load_lut = dce_virtual_crtc_load_lut, + .disable = dce_virtual_crtc_disable, }; static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index) From 0d43f3bca1ea1f85c1b344abcc2f17f4731e5eda Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Mon, 8 Aug 2016 11:32:22 +0800 Subject: [PATCH 086/171] drm/amdgpu: Initialize dce_virtual_crtc_funcs. (v2) For virtual display feature, initialize dce_virtual_crtc_funcs. v2: agd: rebase on upstream Signed-off-by: Emily Deng Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 32 +++++++++++++++++++++--- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 11f04ec51c9c..d6802be0ea39 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -125,13 +125,37 @@ static void dce_virtual_bandwidth_update(struct amdgpu_device *adev) return; } +static int dce_virtual_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, + u16 *green, u16 *blue, uint32_t size) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + int i; + + /* userspace palettes are always correct as is */ + for (i = 0; i < size; i++) { + amdgpu_crtc->lut_r[i] = red[i] >> 6; + amdgpu_crtc->lut_g[i] = green[i] >> 6; + amdgpu_crtc->lut_b[i] = blue[i] >> 6; + } + + return 0; +} + +static void dce_virtual_crtc_destroy(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + drm_crtc_cleanup(crtc); + kfree(amdgpu_crtc); +} + static const struct drm_crtc_funcs dce_virtual_crtc_funcs = { .cursor_set2 = NULL, .cursor_move = NULL, - .gamma_set = NULL, - .set_config = NULL, - .destroy = NULL, - .page_flip = NULL, + .gamma_set = dce_virtual_crtc_gamma_set, + .set_config = amdgpu_crtc_set_config, + .destroy = dce_virtual_crtc_destroy, + .page_flip = amdgpu_crtc_page_flip, }; static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode) From 0bad1619ff5cce53964785a9faedcfb1d4810223 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Mon, 8 Aug 2016 11:32:35 +0800 Subject: [PATCH 087/171] drm/amdgpu: To define whether the GPU has DCE engine. For virtual display feature, when the GPU has DCE engine, need to disable the VGA render and CRTC, or it will hang when initialize GMC. So first detect whether the GPU has DCE engine. Signed-off-by: Emily Deng Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | 27 ++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h | 2 ++ 2 files changed, 29 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 983175363b06..1b621160b52e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -259,6 +259,33 @@ static const int object_connector_convert[] = { DRM_MODE_CONNECTOR_Unknown }; +bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + struct atom_context *ctx = mode_info->atom_context; + int index = GetIndexIntoMasterTable(DATA, Object_Header); + u16 size, data_offset; + u8 frev, crev; + ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj; + ATOM_OBJECT_HEADER *obj_header; + + if (!amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) + return false; + + if (crev < 2) + return false; + + obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); + path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) + (ctx->bios + data_offset + + le16_to_cpu(obj_header->usDisplayPathTableOffset)); + + if (path_obj->ucNumOfDispPath) + return true; + else + return false; +} + bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev) { struct amdgpu_mode_info *mode_info = &adev->mode_info; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 8c2e69661799..15dd43ec38bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -140,6 +140,8 @@ struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device * uint8_t id); void amdgpu_atombios_i2c_init(struct amdgpu_device *adev); +bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev); + bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev); int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev); From 83c9b0253b1136b1312fd2a0bfd173f625c65091 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Mon, 8 Aug 2016 11:33:11 +0800 Subject: [PATCH 088/171] drm/amdgpu: Disable VGA render and crtc when init GMC. For virtual display feature, when the GPU has DCE engine, need to disable the VGA render and CRTC, or it will hang when initialize GMC. Signed-off-by: Emily Deng Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 42 ++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/dce_v10_0.h | 2 + drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 53 ++++++++++++++++++++++-- drivers/gpu/drm/amd/amdgpu/dce_v11_0.h | 2 + drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 51 +++++++++++++++++++++-- drivers/gpu/drm/amd/amdgpu/dce_v8_0.h | 2 + drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 32 ++++++++++++-- 7 files changed, 173 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 7f642b57223e..b888d7223e8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -712,6 +712,45 @@ static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev, WREG32(mmVGA_RENDER_CONTROL, tmp); } +static int dce_v10_0_get_num_crtc(struct amdgpu_device *adev) +{ + int num_crtc = 0; + + switch (adev->asic_type) { + case CHIP_FIJI: + case CHIP_TONGA: + num_crtc = 6; + break; + default: + num_crtc = 0; + } + return num_crtc; +} + +void dce_v10_0_disable_dce(struct amdgpu_device *adev) +{ + /*Disable VGA render and enabled crtc, if has DCE engine*/ + if (amdgpu_atombios_has_dce_engine_info(adev)) { + u32 tmp; + int crtc_enabled, i; + + dce_v10_0_set_vga_render_state(adev, false); + + /*Disable crtc*/ + for (i = 0; i < dce_v10_0_get_num_crtc(adev); i++) { + crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), + CRTC_CONTROL, CRTC_MASTER_EN); + if (crtc_enabled) { + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); + tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); + WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + } + } + } +} + static void dce_v10_0_program_fmt(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; @@ -2962,10 +3001,11 @@ static int dce_v10_0_early_init(void *handle) dce_v10_0_set_display_funcs(adev); dce_v10_0_set_irq_funcs(adev); + adev->mode_info.num_crtc = dce_v10_0_get_num_crtc(adev); + switch (adev->asic_type) { case CHIP_FIJI: case CHIP_TONGA: - adev->mode_info.num_crtc = 6; /* XXX 7??? */ adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 7; break; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h index 1bfa48ddd8a6..e3dc04d293e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h @@ -26,4 +26,6 @@ extern const struct amd_ip_funcs dce_v10_0_ip_funcs; +void dce_v10_0_disable_dce(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index d4bf133908b1..cfadd79d2580 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -673,6 +673,53 @@ static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev, WREG32(mmVGA_RENDER_CONTROL, tmp); } +static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev) +{ + int num_crtc = 0; + + switch (adev->asic_type) { + case CHIP_CARRIZO: + num_crtc = 3; + break; + case CHIP_STONEY: + num_crtc = 2; + break; + case CHIP_POLARIS10: + num_crtc = 6; + break; + case CHIP_POLARIS11: + num_crtc = 5; + break; + default: + num_crtc = 0; + } + return num_crtc; +} + +void dce_v11_0_disable_dce(struct amdgpu_device *adev) +{ + /*Disable VGA render and enabled crtc, if has DCE engine*/ + if (amdgpu_atombios_has_dce_engine_info(adev)) { + u32 tmp; + int crtc_enabled, i; + + dce_v11_0_set_vga_render_state(adev, false); + + /*Disable crtc*/ + for (i = 0; i < dce_v11_0_get_num_crtc(adev); i++) { + crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), + CRTC_CONTROL, CRTC_MASTER_EN); + if (crtc_enabled) { + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); + tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); + WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + } + } + } +} + static void dce_v11_0_program_fmt(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; @@ -2999,24 +3046,22 @@ static int dce_v11_0_early_init(void *handle) dce_v11_0_set_display_funcs(adev); dce_v11_0_set_irq_funcs(adev); + adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev); + switch (adev->asic_type) { case CHIP_CARRIZO: - adev->mode_info.num_crtc = 3; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 9; break; case CHIP_STONEY: - adev->mode_info.num_crtc = 2; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 9; break; case CHIP_POLARIS10: - adev->mode_info.num_crtc = 6; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; break; case CHIP_POLARIS11: - adev->mode_info.num_crtc = 5; adev->mode_info.num_hpd = 5; adev->mode_info.num_dig = 5; break; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h index 84e4618f5253..1f58a65ba2ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h @@ -26,4 +26,6 @@ extern const struct amd_ip_funcs dce_v11_0_ip_funcs; +void dce_v11_0_disable_dce(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 4fdfab1e9200..e4467b8ff40c 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -604,6 +604,52 @@ static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev, WREG32(mmVGA_RENDER_CONTROL, tmp); } +static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev) +{ + int num_crtc = 0; + + switch (adev->asic_type) { + case CHIP_BONAIRE: + case CHIP_HAWAII: + num_crtc = 6; + break; + case CHIP_KAVERI: + num_crtc = 4; + break; + case CHIP_KABINI: + case CHIP_MULLINS: + num_crtc = 2; + break; + default: + num_crtc = 0; + } + return num_crtc; +} + +void dce_v8_0_disable_dce(struct amdgpu_device *adev) +{ + /*Disable VGA render and enabled crtc, if has DCE engine*/ + if (amdgpu_atombios_has_dce_engine_info(adev)) { + u32 tmp; + int crtc_enabled, i; + + dce_v8_0_set_vga_render_state(adev, false); + + /*Disable crtc*/ + for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) { + crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), + CRTC_CONTROL, CRTC_MASTER_EN); + if (crtc_enabled) { + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); + tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); + WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + } + } + } +} + static void dce_v8_0_program_fmt(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; @@ -2803,21 +2849,20 @@ static int dce_v8_0_early_init(void *handle) dce_v8_0_set_display_funcs(adev); dce_v8_0_set_irq_funcs(adev); + adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev); + switch (adev->asic_type) { case CHIP_BONAIRE: case CHIP_HAWAII: - adev->mode_info.num_crtc = 6; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; break; case CHIP_KAVERI: - adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 7; break; case CHIP_KABINI: case CHIP_MULLINS: - adev->mode_info.num_crtc = 2; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; /* ? */ break; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h index 77016852b252..7d0770c3a49b 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h @@ -26,4 +26,6 @@ extern const struct amd_ip_funcs dce_v8_0_ip_funcs; +void dce_v8_0_disable_dce(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index d6802be0ea39..c7da45c2c8fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -25,11 +25,13 @@ #include "amdgpu_pm.h" #include "amdgpu_i2c.h" #include "atom.h" -#include "amdgpu_atombios.h" -#include "atombios_crtc.h" -#include "atombios_encoders.h" #include "amdgpu_pll.h" #include "amdgpu_connectors.h" +#ifdef CONFIG_DRM_AMDGPU_CIK +#include "dce_v8_0.h" +#endif +#include "dce_v10_0.h" +#include "dce_v11_0.h" static void dce_virtual_set_display_funcs(struct amdgpu_device *adev); static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev); @@ -98,6 +100,30 @@ static bool dce_virtual_is_display_hung(struct amdgpu_device *adev) void dce_virtual_stop_mc_access(struct amdgpu_device *adev, struct amdgpu_mode_mc_save *save) { + switch (adev->asic_type) { + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: +#ifdef CONFIG_DRM_AMDGPU_CIK + dce_v8_0_disable_dce(adev); +#endif + break; + case CHIP_FIJI: + case CHIP_TONGA: + dce_v10_0_disable_dce(adev); + break; + case CHIP_CARRIZO: + case CHIP_STONEY: + case CHIP_POLARIS11: + case CHIP_POLARIS10: + dce_v11_0_disable_dce(adev); + break; + default: + DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type); + } + return; } void dce_virtual_resume_mc_access(struct amdgpu_device *adev, From 46ac3622437692c371f3e647dc29f99e14b4f596 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Mon, 8 Aug 2016 11:35:39 +0800 Subject: [PATCH 089/171] drm/amdgpu: Use software timer to generate vsync interrupt. For virtual display feature, use the software timer to simulate the vsync interrupt. Signed-off-by: Emily Deng Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h | 4 +++ drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 33 ++++++++++++++++++++++-- drivers/gpu/drm/amd/amdgpu/dce_virtual.h | 2 ++ 3 files changed, 37 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 6b1d7d306564..b1ae33bdbd84 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -39,6 +39,8 @@ #include #include #include +#include +#include "amdgpu_irq.h" struct amdgpu_bo; struct amdgpu_device; @@ -339,6 +341,8 @@ struct amdgpu_mode_info { int num_dig; /* number of dig blocks */ int disp_priority; const struct amdgpu_display_funcs *funcs; + struct hrtimer vblank_timer; + enum amdgpu_interrupt_state vsync_timer_enabled; }; #define AMDGPU_MAX_BL_LEVEL 0xFF diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index c7da45c2c8fe..ace52a342e64 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -32,6 +32,7 @@ #endif #include "dce_v10_0.h" #include "dce_v11_0.h" +#include "dce_virtual.h" static void dce_virtual_set_display_funcs(struct amdgpu_device *adev); static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev); @@ -642,16 +643,44 @@ static void dce_virtual_set_display_funcs(struct amdgpu_device *adev) adev->mode_info.funcs = &dce_virtual_display_funcs; } +static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer) +{ + struct amdgpu_mode_info *mode_info = container_of(vblank_timer, struct amdgpu_mode_info ,vblank_timer); + struct amdgpu_device *adev = container_of(mode_info, struct amdgpu_device ,mode_info); + unsigned crtc = 0; + adev->ddev->vblank[0].count++; + drm_handle_vblank(adev->ddev, crtc); + hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL); + return HRTIMER_NORESTART; +} + static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, - int crtc, - enum amdgpu_interrupt_state state) + int crtc, + enum amdgpu_interrupt_state state) { if (crtc >= adev->mode_info.num_crtc) { DRM_DEBUG("invalid crtc %d\n", crtc); return; } + + if (state && !adev->mode_info.vsync_timer_enabled) { + DRM_DEBUG("Enable software vsync timer\n"); + hrtimer_init(&adev->mode_info.vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_set_expires(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD)); + adev->mode_info.vblank_timer.function = dce_virtual_vblank_timer_handle; + hrtimer_start(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL); + } else if (!state && adev->mode_info.vsync_timer_enabled) { + DRM_DEBUG("Disable software vsync timer\n"); + hrtimer_cancel(&adev->mode_info.vblank_timer); + } + + if (!state || (state && !adev->mode_info.vsync_timer_enabled)) + adev->ddev->vblank[0].count = 0; + adev->mode_info.vsync_timer_enabled = state; + DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state); } + static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h index d205d7f6f6ab..e239243f6ebc 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h @@ -25,5 +25,7 @@ #define __DCE_VIRTUAL_H__ extern const struct amd_ip_funcs dce_virtual_ip_funcs; +#define DCE_VIRTUAL_VBLANK_PERIOD 16666666 + #endif From 6b5084ccfcd4d43d31c6124968dcc537ea265994 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Mon, 8 Aug 2016 11:36:07 +0800 Subject: [PATCH 090/171] drm/amdgpu: Call pageflip irq funtion when receiced vsync interrupt. For virtual display feature, as there is no dce engine, so no pageflip irq generated. So directly call pageflip irq funtion when received vysn interrupt. Signed-off-by: Emily Deng Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index ace52a342e64..1c16983ec8b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -36,6 +36,9 @@ static void dce_virtual_set_display_funcs(struct amdgpu_device *adev); static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev); +static int dce_virtual_pageflip_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry); /** * dce_virtual_vblank_wait - vblank wait asic callback. @@ -650,6 +653,7 @@ static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vbla unsigned crtc = 0; adev->ddev->vblank[0].count++; drm_handle_vblank(adev->ddev, crtc); + dce_virtual_pageflip_irq(adev, NULL, NULL); hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL); return HRTIMER_NORESTART; } @@ -706,8 +710,8 @@ static void dce_virtual_crtc_vblank_int_ack(struct amdgpu_device *adev, } static int dce_virtual_crtc_irq(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) { unsigned crtc = 0; unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1; @@ -718,7 +722,7 @@ static int dce_virtual_crtc_irq(struct amdgpu_device *adev, if (amdgpu_irq_enabled(adev, source, irq_type)) { drm_handle_vblank(adev->ddev, crtc); } - + dce_virtual_pageflip_irq(adev, NULL, NULL); DRM_DEBUG("IH: D%d vblank\n", crtc + 1); return 0; } From 048a5b76d2ba40adedb98a987bb15a9cc1f0a62b Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Mon, 8 Aug 2016 11:36:31 +0800 Subject: [PATCH 091/171] drm/amdgpu: Add DRM_MODE_CONNECTOR_VIRTUAL connector in amdgpu_connector_add. For virtual display feature, add one connector type in amdgpu_connector_add. Signed-off-by: Emily Deng Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_connectors.c | 91 +++++++++++++++++++ 1 file changed, 91 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index ff0b55a65ca3..319a5e1d9389 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -1504,6 +1504,86 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = { .force = amdgpu_connector_dvi_force, }; +static struct drm_encoder * +amdgpu_connector_virtual_encoder(struct drm_connector *connector) +{ + int enc_id = connector->encoder_ids[0]; + struct drm_encoder *encoder; + int i; + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { + if (connector->encoder_ids[i] == 0) + break; + + encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); + if (!encoder) + continue; + + if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) + return encoder; + } + + /* pick the first one */ + if (enc_id) + return drm_encoder_find(connector->dev, enc_id); + return NULL; +} + +static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector) +{ + struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); + + if (encoder) { + amdgpu_connector_add_common_modes(encoder, connector); + } + + return 0; +} + +static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +int amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode) +{ + return 0; +} + +static enum drm_connector_status + +amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force) +{ + return connector_status_connected; +} + +int amdgpu_connector_virtual_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + return 0; +} + +static void amdgpu_connector_virtual_force(struct drm_connector *connector) +{ + return; +} + +static const struct drm_connector_helper_funcs amdgpu_connector_virtual_helper_funcs = { + .get_modes = amdgpu_connector_virtual_get_modes, + .mode_valid = amdgpu_connector_virtual_mode_valid, + .best_encoder = amdgpu_connector_virtual_encoder, +}; + +static const struct drm_connector_funcs amdgpu_connector_virtual_funcs = { + .dpms = amdgpu_connector_virtual_dpms, + .detect = amdgpu_connector_virtual_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = amdgpu_connector_virtual_set_property, + .destroy = amdgpu_connector_destroy, + .force = amdgpu_connector_virtual_force, +}; + void amdgpu_connector_add(struct amdgpu_device *adev, uint32_t connector_id, @@ -1888,6 +1968,17 @@ amdgpu_connector_add(struct amdgpu_device *adev, connector->interlace_allowed = false; connector->doublescan_allowed = false; break; + case DRM_MODE_CONNECTOR_VIRTUAL: + amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL); + if (!amdgpu_dig_connector) + goto failed; + amdgpu_connector->con_priv = amdgpu_dig_connector; + drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_virtual_funcs, connector_type); + drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_virtual_helper_funcs); + subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + break; } } From e9ed3a67cd1bfd8d0d0dc4968a36f6ea4db2d45a Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Mon, 8 Aug 2016 11:36:45 +0800 Subject: [PATCH 092/171] drm/amdgpu: Define virtual display ip blocks. For virtual display feature, define virtual display ip blocks, and set dce_virtual_ip_funcs to DCE block. Signed-off-by: Emily Deng Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/cik.c | 341 +++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/vi.c | 282 +++++++++++++++++++++++++ 2 files changed, 623 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 4efc901f658c..edcc14287d2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -67,6 +67,7 @@ #include "amdgpu_amdkfd.h" #include "amdgpu_powerplay.h" +#include "dce_virtual.h" /* * Indirect registers accessor @@ -1708,6 +1709,74 @@ static const struct amdgpu_ip_block_version bonaire_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version bonaire_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 2, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 2, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version hawaii_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1776,6 +1845,74 @@ static const struct amdgpu_ip_block_version hawaii_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version hawaii_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 5, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 3, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version kabini_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1844,6 +1981,74 @@ static const struct amdgpu_ip_block_version kabini_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version kabini_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 3, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 2, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version mullins_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1912,6 +2117,74 @@ static const struct amdgpu_ip_block_version mullins_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version mullins_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 3, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 2, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version kaveri_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1980,6 +2253,74 @@ static const struct amdgpu_ip_block_version kaveri_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 1, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 1, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + int cik_set_ip_blocks(struct amdgpu_device *adev) { switch (adev->asic_type) { diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 8f37066adcee..ff78b5a36143 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -77,6 +77,7 @@ #if defined(CONFIG_DRM_AMD_ACP) #include "amdgpu_acp.h" #endif +#include "dce_virtual.h" MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); @@ -890,6 +891,74 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version tonga_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &tonga_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 1, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 10, + .minor = 0, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &sdma_v3_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 5, + .minor = 0, + .rev = 0, + .funcs = &uvd_v5_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &vce_v3_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version fiji_ip_blocks[] = { /* ORDER MATTERS! */ @@ -958,6 +1027,74 @@ static const struct amdgpu_ip_block_version fiji_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version fiji_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 8, + .minor = 5, + .rev = 0, + .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &tonga_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 1, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 10, + .minor = 1, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &sdma_v3_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 6, + .minor = 0, + .rev = 0, + .funcs = &uvd_v6_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &vce_v3_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version polaris11_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1026,6 +1163,74 @@ static const struct amdgpu_ip_block_version polaris11_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version polaris11_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 8, + .minor = 1, + .rev = 0, + .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 3, + .minor = 1, + .rev = 0, + .funcs = &tonga_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 2, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 11, + .minor = 2, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 3, + .minor = 1, + .rev = 0, + .funcs = &sdma_v3_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 6, + .minor = 3, + .rev = 0, + .funcs = &uvd_v6_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 3, + .minor = 4, + .rev = 0, + .funcs = &vce_v3_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version cz_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1103,6 +1308,83 @@ static const struct amdgpu_ip_block_version cz_ip_blocks[] = #endif }; +static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &cz_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 11, + .minor = 0, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &sdma_v3_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 6, + .minor = 0, + .rev = 0, + .funcs = &uvd_v6_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &vce_v3_0_ip_funcs, + }, +#if defined(CONFIG_DRM_AMD_ACP) + { + .type = AMD_IP_BLOCK_TYPE_ACP, + .major = 2, + .minor = 2, + .rev = 0, + .funcs = &acp_ip_funcs, + }, +#endif +}; + int vi_set_ip_blocks(struct amdgpu_device *adev) { switch (adev->asic_type) { From e443059d0f41fcc07f0fb6b3b8ae96dc3d2364c7 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Mon, 8 Aug 2016 11:37:29 +0800 Subject: [PATCH 093/171] drm/amdgpu: Define one variable for virtual display. For virtual display feature, define on variable in amdgpu.ko. When want to enable virtual display feature, need set the option "amdgpu.virtual_display=1". And then disable vga render and crtc if have DCE engine. Signed-off-by: Emily Deng Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 12112cc65dae..54f71565e6cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -92,6 +92,7 @@ extern unsigned amdgpu_cg_mask; extern unsigned amdgpu_pg_mask; extern char *amdgpu_disable_cu; extern int amdgpu_sclk_deep_sleep_en; +extern int amdgpu_virtual_display; #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 44fda31c75d2..421dbbfd5e1b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -90,6 +90,7 @@ unsigned amdgpu_pcie_lane_cap = 0; unsigned amdgpu_cg_mask = 0xffffffff; unsigned amdgpu_pg_mask = 0xffffffff; char *amdgpu_disable_cu = NULL; +int amdgpu_virtual_display = 0; MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); @@ -189,6 +190,9 @@ module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444); MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)"); module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444); +MODULE_PARM_DESC(virtual_display, "enable virtual display (0 = disable virtual display)"); +module_param_named(virtual_display, amdgpu_virtual_display, int, 0444); + static const struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_CIK /* Kaveri */ From a6be7570518f85ce94ca9d6540543e00725828d3 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Mon, 8 Aug 2016 11:37:50 +0800 Subject: [PATCH 094/171] drm/amdgpu: Set ip_blocks according variable amdgpu_virtual_display. For virtual display feature, if user set the option "amdgpu.virtual_display=1" when load amdgpu.ko. Then need to set the ip_blocks with virtual display ip blocks. And when enable virtual display, the amdgpu_dal need to be set to zero. Signed-off-by: Emily Deng Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 + drivers/gpu/drm/amd/amdgpu/cik.c | 77 +++++++++++++------- drivers/gpu/drm/amd/amdgpu/vi.c | 84 +++++++++++++++------- 3 files changed, 113 insertions(+), 50 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 72d5d094965d..4bf9bd96a46e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1185,6 +1185,8 @@ static int amdgpu_early_init(struct amdgpu_device *adev) { int i, r; + DRM_INFO("virtual display enabled:%d\n", amdgpu_virtual_display); + switch (adev->asic_type) { case CHIP_TOPAZ: case CHIP_TONGA: diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index edcc14287d2d..15200b1b77d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -2323,30 +2323,59 @@ static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] = int cik_set_ip_blocks(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_BONAIRE: - adev->ip_blocks = bonaire_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks); - break; - case CHIP_HAWAII: - adev->ip_blocks = hawaii_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks); - break; - case CHIP_KAVERI: - adev->ip_blocks = kaveri_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks); - break; - case CHIP_KABINI: - adev->ip_blocks = kabini_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks); - break; - case CHIP_MULLINS: - adev->ip_blocks = mullins_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks); - break; - default: - /* FIXME: not supported yet */ - return -EINVAL; + if (amdgpu_virtual_display) { + adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE; + switch (adev->asic_type) { + case CHIP_BONAIRE: + adev->ip_blocks = bonaire_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks_vd); + break; + case CHIP_HAWAII: + adev->ip_blocks = hawaii_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks_vd); + break; + case CHIP_KAVERI: + adev->ip_blocks = kaveri_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks_vd); + break; + case CHIP_KABINI: + adev->ip_blocks = kabini_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks_vd); + break; + case CHIP_MULLINS: + adev->ip_blocks = mullins_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks_vd); + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } + } else { + switch (adev->asic_type) { + case CHIP_BONAIRE: + adev->ip_blocks = bonaire_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks); + break; + case CHIP_HAWAII: + adev->ip_blocks = hawaii_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks); + break; + case CHIP_KAVERI: + adev->ip_blocks = kaveri_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks); + break; + case CHIP_KABINI: + adev->ip_blocks = kabini_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks); + break; + case CHIP_MULLINS: + adev->ip_blocks = mullins_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks); + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index ff78b5a36143..064291771d79 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1387,32 +1387,64 @@ static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] = int vi_set_ip_blocks(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_TOPAZ: - adev->ip_blocks = topaz_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); - break; - case CHIP_FIJI: - adev->ip_blocks = fiji_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks); - break; - case CHIP_TONGA: - adev->ip_blocks = tonga_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); - break; - case CHIP_POLARIS11: - case CHIP_POLARIS10: - adev->ip_blocks = polaris11_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks); - break; - case CHIP_CARRIZO: - case CHIP_STONEY: - adev->ip_blocks = cz_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks); - break; - default: - /* FIXME: not supported yet */ - return -EINVAL; + if (amdgpu_virtual_display) { + adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE; + switch (adev->asic_type) { + case CHIP_TOPAZ: + adev->ip_blocks = topaz_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); + break; + case CHIP_FIJI: + adev->ip_blocks = fiji_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks_vd); + break; + case CHIP_TONGA: + adev->ip_blocks = tonga_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks_vd); + break; + case CHIP_POLARIS11: + case CHIP_POLARIS10: + adev->ip_blocks = polaris11_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks_vd); + break; + + case CHIP_CARRIZO: + case CHIP_STONEY: + adev->ip_blocks = cz_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_vd); + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } + } else { + switch (adev->asic_type) { + case CHIP_TOPAZ: + adev->ip_blocks = topaz_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); + break; + case CHIP_FIJI: + adev->ip_blocks = fiji_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks); + break; + case CHIP_TONGA: + adev->ip_blocks = tonga_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); + break; + case CHIP_POLARIS11: + case CHIP_POLARIS10: + adev->ip_blocks = polaris11_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks); + break; + case CHIP_CARRIZO: + case CHIP_STONEY: + adev->ip_blocks = cz_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks); + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } } return 0; From d06b7e1cf0fa4552fd725111b46df1ed0b649b15 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 8 Aug 2016 14:35:55 -0400 Subject: [PATCH 095/171] drm/amdgpu: move vsync_timer_enabled setup to dce virtual early_init Put it in one place. Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/cik.c | 1 - drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 1 + drivers/gpu/drm/amd/amdgpu/vi.c | 1 - 3 files changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 15200b1b77d6..e539b28dbef7 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -2324,7 +2324,6 @@ static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] = int cik_set_ip_blocks(struct amdgpu_device *adev) { if (amdgpu_virtual_display) { - adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE; switch (adev->asic_type) { case CHIP_BONAIRE: adev->ip_blocks = bonaire_ip_blocks_vd; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 1c16983ec8b6..4c8ca58a3f94 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -353,6 +353,7 @@ static int dce_virtual_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE; dce_virtual_set_display_funcs(adev); dce_virtual_set_irq_funcs(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 064291771d79..2d3e3ed84dcb 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1388,7 +1388,6 @@ static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] = int vi_set_ip_blocks(struct amdgpu_device *adev) { if (amdgpu_virtual_display) { - adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE; switch (adev->asic_type) { case CHIP_TOPAZ: adev->ip_blocks = topaz_ip_blocks; From 2579de4394309da473d89cd5df08dc0bfe5e87f5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 8 Aug 2016 14:40:04 -0400 Subject: [PATCH 096/171] drm/amdgpu/virtual_dce: add case for topaz for disable_dce This asic has no DCE block. Also clarify the error message for unmatched chips. Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 4c8ca58a3f94..5499693f1dcf 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -124,8 +124,11 @@ void dce_virtual_stop_mc_access(struct amdgpu_device *adev, case CHIP_POLARIS10: dce_v11_0_disable_dce(adev); break; + case CHIP_TOPAZ: + /* no DCE */ + return; default: - DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type); + DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type); } return; From 4f4b78341b658cfa0ca7b1587f5540c21f9db997 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 8 Aug 2016 14:45:29 -0400 Subject: [PATCH 097/171] drm/amdgpu: add virtual dce support for iceland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vi.c | 58 +++++++++++++++++++++++++++++++-- 1 file changed, 56 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 2d3e3ed84dcb..69c2f8175e68 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -823,6 +823,60 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version topaz_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 4, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 4, + .rev = 0, + .funcs = &iceland_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 1, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 4, + .rev = 0, + .funcs = &sdma_v2_4_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version tonga_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1390,8 +1444,8 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) if (amdgpu_virtual_display) { switch (adev->asic_type) { case CHIP_TOPAZ: - adev->ip_blocks = topaz_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); + adev->ip_blocks = topaz_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks_vd); break; case CHIP_FIJI: adev->ip_blocks = fiji_ip_blocks_vd; From 9accf2fd33e969862c55be0c20dbfb9b0890bbb8 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Wed, 10 Aug 2016 16:01:25 +0800 Subject: [PATCH 098/171] drm/amdgpu: Change the virtual_display type from int to char*. For virtual display feature, as there may be multiple GPUs, for user could choose whiche GPU need to enable this feature, change the type of virtual_display from int to char*. The variable will be set like this virtual_display="xxxx:xx:xx.x;xxxx:xx:xx.x;". Signed-off-by: Emily Deng Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 29 +++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 6 ++--- drivers/gpu/drm/amd/amdgpu/cik.c | 2 +- drivers/gpu/drm/amd/amdgpu/vi.c | 2 +- 5 files changed, 35 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 54f71565e6cc..46f250c80a31 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -92,7 +92,7 @@ extern unsigned amdgpu_cg_mask; extern unsigned amdgpu_pg_mask; extern char *amdgpu_disable_cu; extern int amdgpu_sclk_deep_sleep_en; -extern int amdgpu_virtual_display; +extern char *amdgpu_virtual_display; #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ @@ -2072,6 +2072,7 @@ struct amdgpu_device { atomic_t gpu_reset_counter; /* display */ + bool enable_virtual_display; struct amdgpu_mode_info mode_info; struct work_struct hotplug_work; struct amdgpu_irq_src crtc_irq; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 4bf9bd96a46e..3751d262de68 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -46,6 +46,7 @@ #endif #include "vi.h" #include "bif/bif_4_1_d.h" +#include static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); @@ -1181,11 +1182,37 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, return 1; } +static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev) +{ + adev->enable_virtual_display = false; + + if (amdgpu_virtual_display) { + struct drm_device *ddev = adev->ddev; + const char *pci_address_name = pci_name(ddev->pdev); + char *pciaddstr, *pciaddstr_tmp, *pciaddname; + + pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); + pciaddstr_tmp = pciaddstr; + while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) { + if (!strcmp(pci_address_name, pciaddname)) { + adev->enable_virtual_display = true; + break; + } + } + + DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n", + amdgpu_virtual_display, pci_address_name, + adev->enable_virtual_display); + + kfree(pciaddstr); + } +} + static int amdgpu_early_init(struct amdgpu_device *adev) { int i, r; - DRM_INFO("virtual display enabled:%d\n", amdgpu_virtual_display); + amdgpu_whether_enable_virtual_display(adev); switch (adev->asic_type) { case CHIP_TOPAZ: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 421dbbfd5e1b..58b1db82f589 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -90,7 +90,7 @@ unsigned amdgpu_pcie_lane_cap = 0; unsigned amdgpu_cg_mask = 0xffffffff; unsigned amdgpu_pg_mask = 0xffffffff; char *amdgpu_disable_cu = NULL; -int amdgpu_virtual_display = 0; +char *amdgpu_virtual_display = NULL; MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); @@ -190,8 +190,8 @@ module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444); MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)"); module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444); -MODULE_PARM_DESC(virtual_display, "enable virtual display (0 = disable virtual display)"); -module_param_named(virtual_display, amdgpu_virtual_display, int, 0444); +MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x;xxxx:xx:xx.x)"); +module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444); static const struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_CIK diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index e539b28dbef7..825de800b798 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -2323,7 +2323,7 @@ static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] = int cik_set_ip_blocks(struct amdgpu_device *adev) { - if (amdgpu_virtual_display) { + if (adev->enable_virtual_display) { switch (adev->asic_type) { case CHIP_BONAIRE: adev->ip_blocks = bonaire_ip_blocks_vd; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 69c2f8175e68..f2e8aa1a0dbd 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1441,7 +1441,7 @@ static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] = int vi_set_ip_blocks(struct amdgpu_device *adev) { - if (amdgpu_virtual_display) { + if (adev->enable_virtual_display) { switch (adev->asic_type) { case CHIP_TOPAZ: adev->ip_blocks = topaz_ip_blocks_vd; From 62d2ce4b09f62c4be62ba7b2cab81d164403bf85 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Tue, 9 Aug 2016 10:07:36 -0400 Subject: [PATCH 099/171] drm/amd/amdgpu: Correct whitespace in GFX v8 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix various whitespace issues in gfx v8 driver. Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 41 +++++++++++---------------- 1 file changed, 16 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 7e8cddb6d512..ac29fe6f785c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1232,10 +1232,9 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) if (adev->gfx.rlc.clear_state_obj) { r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r); + dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r); amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); adev->gfx.rlc.clear_state_obj = NULL; } @@ -1247,7 +1246,6 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); adev->gfx.rlc.cp_table_obj = NULL; } @@ -1289,14 +1287,14 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) &adev->gfx.rlc.clear_state_gpu_addr); if (r) { amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r); + dev_warn(adev->dev, "(%d) pin RLC cbs bo failed\n", r); gfx_v8_0_rlc_fini(adev); return r; } r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr); if (r) { - dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r); + dev_warn(adev->dev, "(%d) map RLC cbs bo failed\n", r); gfx_v8_0_rlc_fini(adev); return r; } @@ -1331,7 +1329,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) &adev->gfx.rlc.cp_table_gpu_addr); if (r) { amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r); + dev_warn(adev->dev, "(%d) pin RLC cp table bo failed\n", r); return r; } r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr); @@ -1344,7 +1342,6 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - } return 0; @@ -1360,7 +1357,6 @@ static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); - amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj); adev->gfx.mec.hpd_eop_obj = NULL; } @@ -2123,9 +2119,7 @@ static int gfx_v8_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); gfx_v8_0_mec_fini(adev); - gfx_v8_0_rlc_fini(adev); - gfx_v8_0_free_microcode(adev); return 0; @@ -3581,7 +3575,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config); gfx_v8_0_tiling_mode_table_init(adev); - gfx_v8_0_setup_rb(adev); gfx_v8_0_get_cu_info(adev); @@ -3994,14 +3987,13 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) /* disable CG */ WREG32(mmRLC_CGCG_CGLS_CTRL, 0); if (adev->asic_type == CHIP_POLARIS11 || - adev->asic_type == CHIP_POLARIS10) + adev->asic_type == CHIP_POLARIS10) WREG32(mmRLC_CGCG_CGLS_CTRL_3D, 0); /* disable PG */ WREG32(mmRLC_PG_CNTL, 0); gfx_v8_0_rlc_reset(adev); - gfx_v8_0_init_pg(adev); if (!adev->pp_enabled) { @@ -4976,7 +4968,6 @@ static int gfx_v8_0_hw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; gfx_v8_0_init_golden_registers(adev); - gfx_v8_0_gpu_init(adev); r = gfx_v8_0_rlc_resume(adev); @@ -5548,15 +5539,15 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, data = RREG32(mmRLC_SERDES_WR_CTRL); if (adev->asic_type == CHIP_STONEY) - data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK | - RLC_SERDES_WR_CTRL__READ_COMMAND_MASK | - RLC_SERDES_WR_CTRL__P1_SELECT_MASK | - RLC_SERDES_WR_CTRL__P2_SELECT_MASK | - RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK | - RLC_SERDES_WR_CTRL__POWER_DOWN_MASK | - RLC_SERDES_WR_CTRL__POWER_UP_MASK | - RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK | - RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK); + data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK | + RLC_SERDES_WR_CTRL__READ_COMMAND_MASK | + RLC_SERDES_WR_CTRL__P1_SELECT_MASK | + RLC_SERDES_WR_CTRL__P2_SELECT_MASK | + RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK | + RLC_SERDES_WR_CTRL__POWER_DOWN_MASK | + RLC_SERDES_WR_CTRL__POWER_UP_MASK | + RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK | + RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK); else data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK | RLC_SERDES_WR_CTRL__READ_COMMAND_MASK | @@ -6089,9 +6080,9 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, #ifdef __BIG_ENDIAN - (2 << 0) | + (2 << 0) | #endif - (ib->gpu_addr & 0xFFFFFFFC)); + (ib->gpu_addr & 0xFFFFFFFC)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); amdgpu_ring_write(ring, control); } From 5003f2785a20d9caf4040d9d9039644f6ee41892 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Tue, 9 Aug 2016 10:10:30 -0400 Subject: [PATCH 100/171] drm/amd/amdgpu: Simplify various gfx v8 functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 61 +++++++++------------------ 1 file changed, 20 insertions(+), 41 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index ac29fe6f785c..f0a813c57164 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3455,19 +3455,16 @@ static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, else data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); - if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) { - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); + if (se_num == 0xffffffff) data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); - } else if (se_num == 0xffffffff) { - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); - } else if (sh_num == 0xffffffff) { + else + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); + + if (sh_num == 0xffffffff) data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); - } else { + else data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); - } + WREG32(mmGRBM_GFX_INDEX, data); } @@ -3480,11 +3477,10 @@ static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; - data = RREG32(mmCC_RB_BACKEND_DISABLE); - data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); + data = RREG32(mmCC_RB_BACKEND_DISABLE) | + RREG32(mmGC_USER_RB_BACKEND_DISABLE); - data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; - data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; + data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE); mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se / adev->gfx.config.max_sh_per_se); @@ -4288,12 +4284,10 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) gfx_v8_0_cp_gfx_start(adev); ring->ready = true; r = amdgpu_ring_test_ring(ring); - if (r) { + if (r) ring->ready = false; - return r; - } - return 0; + return r; } static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) @@ -4975,8 +4969,6 @@ static int gfx_v8_0_hw_init(void *handle) return r; r = gfx_v8_0_cp_resume(adev); - if (r) - return r; return r; } @@ -5024,15 +5016,12 @@ static bool gfx_v8_0_is_idle(void *handle) static int gfx_v8_0_wait_for_idle(void *handle) { unsigned i; - u32 tmp; struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { - /* read MC_STATUS */ - tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK; - - if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE)) + if (gfx_v8_0_is_idle(handle)) return 0; + udelay(1); } return -ETIMEDOUT; @@ -5963,25 +5952,18 @@ static int gfx_v8_0_set_clockgating_state(void *handle, static u32 gfx_v8_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) { - u32 rptr; - - rptr = ring->adev->wb.wb[ring->rptr_offs]; - - return rptr; + return ring->adev->wb.wb[ring->rptr_offs]; } static u32 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - u32 wptr; if (ring->use_doorbell) /* XXX check if swapping is necessary on BE */ - wptr = ring->adev->wb.wb[ring->wptr_offs]; + return ring->adev->wb.wb[ring->wptr_offs]; else - wptr = RREG32(mmCP_RB0_WPTR); - - return wptr; + return RREG32(mmCP_RB0_WPTR); } static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) @@ -6591,15 +6573,12 @@ static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; - data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); - data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); - - data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; - data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; + data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) | + RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_cu_per_sh); - return (~data) & mask; + return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask; } static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) From 61cb8cef526d238de2ff278d9e562a6239d8f98a Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Tue, 9 Aug 2016 10:13:21 -0400 Subject: [PATCH 101/171] drm/amd/amdgpu: Simplify bitfield operations in gfx v8 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch introduces a new macro WREG32_FIELD which is used to write to a register with a new value in a field. It's designed to replace the pattern: tmp = RREG32(mmFoo); tmp &= ~REG__FIELD_MASK; tmp |= new_value << REG__FIELD__SHIFT; WREG32(mmFoo, tmp) with: WREG32_FIELD(Foo, FIELD, new_value); Unlike WREG32_P() it understands offsets/masks and doesn't require the caller to shift the value (or mask properly). It's applied where suitable in the gfx_v8_0.c driver to start with. Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 + drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 273 +++++--------------------- 2 files changed, 47 insertions(+), 229 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 46f250c80a31..5a4af87bb209 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -2210,6 +2210,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); #define REG_GET_FIELD(value, reg, field) \ (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field)) +#define WREG32_FIELD(reg, field, val) \ + WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) + /* * BIOS helpers. */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index f0a813c57164..6022208ead31 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3562,10 +3562,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) u32 tmp; int i; - tmp = RREG32(mmGRBM_CNTL); - tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff); - WREG32(mmGRBM_CNTL, tmp); - + WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF); WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config); WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config); @@ -3754,9 +3751,7 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev) sizeof(indirect_start_offsets)/sizeof(int)); /* save and restore list */ - temp = RREG32(mmRLC_SRM_CNTL); - temp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; - WREG32(mmRLC_SRM_CNTL, temp); + WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1); WREG32(mmRLC_SRM_ARAM_ADDR, 0); for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++) @@ -3793,11 +3788,7 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev) static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev) { - uint32_t data; - - data = RREG32(mmRLC_SRM_CNTL); - data |= RLC_SRM_CNTL__SRM_ENABLE_MASK; - WREG32(mmRLC_SRM_CNTL, data); + WREG32_FIELD(RLC_SRM_CNTL, SRM_ENABLE, 1); } static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev) @@ -3807,75 +3798,34 @@ static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev) if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG | AMD_PG_SUPPORT_GFX_DMG)) { - data = RREG32(mmCP_RB_WPTR_POLL_CNTL); - data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK; - data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); - WREG32(mmCP_RB_WPTR_POLL_CNTL, data); + WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60); - data = 0; - data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT); - data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT); - data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT); - data |= (0x10 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT); + data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10); + data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10); + data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10); + data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10); WREG32(mmRLC_PG_DELAY, data); - data = RREG32(mmRLC_PG_DELAY_2); - data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK; - data |= (0x3 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT); - WREG32(mmRLC_PG_DELAY_2, data); - - data = RREG32(mmRLC_AUTO_PG_CTRL); - data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK; - data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT); - WREG32(mmRLC_AUTO_PG_CTRL, data); + WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3); + WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0); } } static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev, bool enable) { - u32 data, orig; - - orig = data = RREG32(mmRLC_PG_CNTL); - - if (enable) - data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; - - if (orig != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PU_ENABLE, enable ? 1 : 0); } static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev, bool enable) { - u32 data, orig; - - orig = data = RREG32(mmRLC_PG_CNTL); - - if (enable) - data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; - - if (orig != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PD_ENABLE, enable ? 1 : 0); } static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable) { - u32 data, orig; - - orig = data = RREG32(mmRLC_PG_CNTL); - - if (enable) - data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK; - else - data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK; - - if (orig != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 1 : 0); } static void gfx_v8_0_init_pg(struct amdgpu_device *adev) @@ -3914,34 +3864,24 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev) void gfx_v8_0_rlc_stop(struct amdgpu_device *adev) { - u32 tmp = RREG32(mmRLC_CNTL); - - tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0); - WREG32(mmRLC_CNTL, tmp); + WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0); gfx_v8_0_enable_gui_idle_interrupt(adev, false); - gfx_v8_0_wait_for_rlc_serdes(adev); } static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev) { - u32 tmp = RREG32(mmGRBM_SOFT_RESET); - - tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); - WREG32(mmGRBM_SOFT_RESET, tmp); + WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); udelay(50); - tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); - WREG32(mmGRBM_SOFT_RESET, tmp); + + WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); udelay(50); } static void gfx_v8_0_rlc_start(struct amdgpu_device *adev) { - u32 tmp = RREG32(mmRLC_CNTL); - - tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 1); - WREG32(mmRLC_CNTL, tmp); + WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 1); /* carrizo do enable cp interrupt after cp inited */ if (!(adev->flags & AMD_IS_APU)) @@ -5367,8 +5307,6 @@ static int gfx_v8_0_late_init(void *handle) static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, bool enable) { - uint32_t data, temp; - if (adev->asic_type == CHIP_POLARIS11) /* Send msg to SMU via Powerplay */ amdgpu_set_powergating_state(adev, @@ -5376,83 +5314,35 @@ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade enable ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE); - temp = data = RREG32(mmRLC_PG_CNTL); - /* Enable static MGPG */ - if (enable) - data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; - - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0); } static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev, bool enable) { - uint32_t data, temp; - - temp = data = RREG32(mmRLC_PG_CNTL); - /* Enable dynamic MGPG */ - if (enable) - data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; - - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, DYN_PER_CU_PG_ENABLE, enable ? 1 : 0); } static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev, bool enable) { - uint32_t data, temp; - - temp = data = RREG32(mmRLC_PG_CNTL); - /* Enable quick PG */ - if (enable) - data |= RLC_PG_CNTL__QUICK_PG_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__QUICK_PG_ENABLE_MASK; - - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, QUICK_PG_ENABLE, enable ? 1 : 0); } static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev, bool enable) { - u32 data, orig; - - orig = data = RREG32(mmRLC_PG_CNTL); - - if (enable) - data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; - - if (orig != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, enable ? 1 : 0); } static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev, bool enable) { - u32 data, orig; - - orig = data = RREG32(mmRLC_PG_CNTL); - - if (enable) - data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK; - - if (orig != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, GFX_PIPELINE_PG_ENABLE, enable ? 1 : 0); /* Read any GFX register to wake up GFX. */ if (!enable) - data = RREG32(mmDB_RENDER_CONTROL); + RREG32(mmDB_RENDER_CONTROL); } static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev, @@ -5559,10 +5449,10 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, #define MSG_ENTER_RLC_SAFE_MODE 1 #define MSG_EXIT_RLC_SAFE_MODE 0 - -#define RLC_GPR_REG2__REQ_MASK 0x00000001 -#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001 -#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e +#define RLC_GPR_REG2__REQ_MASK 0x00000001 +#define RLC_GPR_REG2__REQ__SHIFT 0 +#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001 +#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev) { @@ -5592,7 +5482,7 @@ static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev) } for (i = 0; i < adev->usec_timeout; i++) { - if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0) + if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ)) break; udelay(1); } @@ -5620,7 +5510,7 @@ static void cz_exit_rlc_safe_mode(struct amdgpu_device *adev) } for (i = 0; i < adev->usec_timeout; i++) { - if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0) + if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ)) break; udelay(1); } @@ -5652,7 +5542,7 @@ static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev) } for (i = 0; i < adev->usec_timeout; i++) { - if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0) + if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) break; udelay(1); } @@ -5679,7 +5569,7 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev) } for (i = 0; i < adev->usec_timeout; i++) { - if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0) + if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) break; udelay(1); } @@ -5720,21 +5610,12 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev /* It is disabled by HW by default */ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { - if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) /* 1 - RLC memory Light sleep */ - temp = data = RREG32(mmRLC_MEM_SLP_CNTL); - data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; - if (temp != data) - WREG32(mmRLC_MEM_SLP_CNTL, data); - } + WREG32_FIELD(RLC_MEM_SLP_CNTL, RLC_MEM_LS_EN, 1); - if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { - /* 2 - CP memory Light sleep */ - temp = data = RREG32(mmCP_MEM_SLP_CNTL); - data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; - if (temp != data) - WREG32(mmCP_MEM_SLP_CNTL, data); - } + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) + WREG32_FIELD(CP_MEM_SLP_CNTL, CP_MEM_LS_EN, 1); } /* 3 - RLC_CGTT_MGCG_OVERRIDE */ @@ -6209,33 +6090,14 @@ static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring, static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, enum amdgpu_interrupt_state state) { - u32 cp_int_cntl; - - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - TIME_STAMP_INT_ENABLE, 0); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - case AMDGPU_IRQ_STATE_ENABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = - REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - TIME_STAMP_INT_ENABLE, 1); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - default: - break; - } + WREG32_FIELD(CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE, + state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); } static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, int me, int pipe, enum amdgpu_interrupt_state state) { - u32 mec_int_cntl, mec_int_cntl_reg; - /* * amdgpu controls only pipe 0 of MEC1. That's why this function only * handles the setting of interrupts for this specific pipe. All other @@ -6245,7 +6107,6 @@ static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, if (me == 1) { switch (pipe) { case 0: - mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL; break; default: DRM_DEBUG("invalid pipe %d\n", pipe); @@ -6256,22 +6117,8 @@ static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, return; } - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - mec_int_cntl = RREG32(mec_int_cntl_reg); - mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, - TIME_STAMP_INT_ENABLE, 0); - WREG32(mec_int_cntl_reg, mec_int_cntl); - break; - case AMDGPU_IRQ_STATE_ENABLE: - mec_int_cntl = RREG32(mec_int_cntl_reg); - mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, - TIME_STAMP_INT_ENABLE, 1); - WREG32(mec_int_cntl_reg, mec_int_cntl); - break; - default: - break; - } + WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, TIME_STAMP_INT_ENABLE, + state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); } static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev, @@ -6279,24 +6126,8 @@ static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - u32 cp_int_cntl; - - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - PRIV_REG_INT_ENABLE, 0); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - case AMDGPU_IRQ_STATE_ENABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - PRIV_REG_INT_ENABLE, 1); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - default: - break; - } + WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE, + state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); return 0; } @@ -6306,24 +6137,8 @@ static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - u32 cp_int_cntl; - - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - PRIV_INSTR_INT_ENABLE, 0); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - case AMDGPU_IRQ_STATE_ENABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - PRIV_INSTR_INT_ENABLE, 1); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - default: - break; - } + WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE, + state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); return 0; } From 3a8a6ab4a59fdc812fe020d97726ff925b341ec8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 3 Aug 2016 17:39:26 +0200 Subject: [PATCH 102/171] drm/amdgpu: remove unused VM defines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not used for a long time. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 5a4af87bb209..3b7330c3825f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -829,8 +829,6 @@ struct amdgpu_ring { /* PTBs (Page Table Blocks) need to be aligned to 32K */ #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 -#define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1) -#define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK) #define AMDGPU_PTE_VALID (1 << 0) #define AMDGPU_PTE_SYSTEM (1 << 1) From 1303c73c61fea8cc5509e5b0e3cbe5253e260ca1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 3 Aug 2016 17:46:42 +0200 Subject: [PATCH 103/171] drm/amdgpu: cleanup VM fragment defines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We can actually do way more than just the 64KB we currently used as default. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 8 ++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 3b7330c3825f..3d128c16334d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -830,6 +830,9 @@ struct amdgpu_ring { /* PTBs (Page Table Blocks) need to be aligned to 32K */ #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 +/* LOG2 number of continuous pages for the fragment field */ +#define AMDGPU_LOG2_PAGES_PER_FRAG 4 + #define AMDGPU_PTE_VALID (1 << 0) #define AMDGPU_PTE_SYSTEM (1 << 1) #define AMDGPU_PTE_SNOOPED (1 << 2) @@ -840,10 +843,7 @@ struct amdgpu_ring { #define AMDGPU_PTE_READABLE (1 << 5) #define AMDGPU_PTE_WRITEABLE (1 << 6) -/* PTE (Page Table Entry) fragment field for different page sizes */ -#define AMDGPU_PTE_FRAG_4KB (0 << 7) -#define AMDGPU_PTE_FRAG_64KB (4 << 7) -#define AMDGPU_LOG2_PAGES_PER_FRAG 4 +#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7) /* How to programm VM fault handling */ #define AMDGPU_VM_FAULT_STOP_NEVER 0 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8e642fc48df4..ac209a51772b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -739,7 +739,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, */ /* SI and newer are optimized for 64KB */ - uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB; + uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG); uint64_t frag_align = 0x80; uint64_t frag_start = ALIGN(pe_start, frag_align); From 29efc4f5dfe47e992b04f92c4a4d990d03816e78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 4 Aug 2016 14:52:50 +0200 Subject: [PATCH 104/171] drm/amdgpu: rename amdgpu_vm_update_params MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Well those are actually page table entry parameters. This also makes the variable names used a bit shorter. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 91 +++++++++++++------------- 1 file changed, 44 insertions(+), 47 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ac209a51772b..577abfd3879e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -57,7 +57,7 @@ /* Local structure. Encapsulate some VM table update parameters to reduce * the number of function parameters */ -struct amdgpu_vm_update_params { +struct amdgpu_pte_update_params { /* address where to copy page table entries from */ uint64_t src; /* DMA addresses to use for mapping */ @@ -470,7 +470,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, * amdgpu_vm_update_pages - helper to call the right asic function * * @adev: amdgpu_device pointer - * @vm_update_params: see amdgpu_vm_update_params definition + * @params: see amdgpu_pte_update_params definition * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update @@ -481,29 +481,28 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, * to setup the page table using the DMA. */ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, - struct amdgpu_vm_update_params - *vm_update_params, + struct amdgpu_pte_update_params *params, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); - if (vm_update_params->src) { - amdgpu_vm_copy_pte(adev, vm_update_params->ib, - pe, (vm_update_params->src + (addr >> 12) * 8), count); + if (params->src) { + amdgpu_vm_copy_pte(adev, params->ib, + pe, (params->src + (addr >> 12) * 8), count); - } else if (vm_update_params->pages_addr) { - amdgpu_vm_write_pte(adev, vm_update_params->ib, - vm_update_params->pages_addr, + } else if (params->pages_addr) { + amdgpu_vm_write_pte(adev, params->ib, + params->pages_addr, pe, addr, count, incr, flags); } else if (count < 3) { - amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr, + amdgpu_vm_write_pte(adev, params->ib, NULL, pe, addr, count, incr, flags); } else { - amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr, + amdgpu_vm_set_pte_pde(adev, params->ib, pe, addr, count, incr, flags); } } @@ -523,12 +522,12 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, struct amdgpu_ring *ring; struct fence *fence = NULL; struct amdgpu_job *job; - struct amdgpu_vm_update_params vm_update_params; + struct amdgpu_pte_update_params params; unsigned entries; uint64_t addr; int r; - memset(&vm_update_params, 0, sizeof(vm_update_params)); + memset(¶ms, 0, sizeof(params)); ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); r = reservation_object_reserve_shared(bo->tbo.resv); @@ -546,8 +545,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, if (r) goto error; - vm_update_params.ib = &job->ibs[0]; - amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries, + params.ib = &job->ibs[0]; + amdgpu_vm_update_pages(adev, ¶ms, addr, 0, entries, 0, 0); amdgpu_ring_pad_ib(ring, &job->ibs[0]); @@ -620,12 +619,12 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, uint64_t last_pde = ~0, last_pt = ~0; unsigned count = 0, pt_idx, ndw; struct amdgpu_job *job; - struct amdgpu_vm_update_params vm_update_params; + struct amdgpu_pte_update_params params; struct fence *fence = NULL; int r; - memset(&vm_update_params, 0, sizeof(vm_update_params)); + memset(¶ms, 0, sizeof(params)); ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); /* padding, etc. */ @@ -638,7 +637,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, if (r) return r; - vm_update_params.ib = &job->ibs[0]; + params.ib = &job->ibs[0]; /* walk over the address space and update the page directory */ for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { @@ -658,7 +657,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ((last_pt + incr * count) != pt)) { if (count) { - amdgpu_vm_update_pages(adev, &vm_update_params, + amdgpu_vm_update_pages(adev, ¶ms, last_pde, last_pt, count, incr, AMDGPU_PTE_VALID); @@ -673,15 +672,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, } if (count) - amdgpu_vm_update_pages(adev, &vm_update_params, + amdgpu_vm_update_pages(adev, ¶ms, last_pde, last_pt, count, incr, AMDGPU_PTE_VALID); - if (vm_update_params.ib->length_dw != 0) { - amdgpu_ring_pad_ib(ring, vm_update_params.ib); + if (params.ib->length_dw != 0) { + amdgpu_ring_pad_ib(ring, params.ib); amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); - WARN_ON(vm_update_params.ib->length_dw > ndw); + WARN_ON(params.ib->length_dw > ndw); r = amdgpu_job_submit(job, ring, &vm->entity, AMDGPU_FENCE_OWNER_VM, &fence); if (r) @@ -707,15 +706,14 @@ error_free: * amdgpu_vm_frag_ptes - add fragment information to PTEs * * @adev: amdgpu_device pointer - * @vm_update_params: see amdgpu_vm_update_params definition + * @params: see amdgpu_pte_update_params definition * @pe_start: first PTE to handle * @pe_end: last PTE to handle * @addr: addr those PTEs should point to * @flags: hw mapping flags */ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, - struct amdgpu_vm_update_params - *vm_update_params, + struct amdgpu_pte_update_params *params, uint64_t pe_start, uint64_t pe_end, uint64_t addr, uint32_t flags) { @@ -752,11 +750,11 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, return; /* system pages are non continuously */ - if (vm_update_params->src || vm_update_params->pages_addr || + if (params->src || params->pages_addr || !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { count = (pe_end - pe_start) / 8; - amdgpu_vm_update_pages(adev, vm_update_params, pe_start, + amdgpu_vm_update_pages(adev, params, pe_start, addr, count, AMDGPU_GPU_PAGE_SIZE, flags); return; @@ -765,21 +763,21 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, /* handle the 4K area at the beginning */ if (pe_start != frag_start) { count = (frag_start - pe_start) / 8; - amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr, + amdgpu_vm_update_pages(adev, params, pe_start, addr, count, AMDGPU_GPU_PAGE_SIZE, flags); addr += AMDGPU_GPU_PAGE_SIZE * count; } /* handle the area in the middle */ count = (frag_end - frag_start) / 8; - amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count, + amdgpu_vm_update_pages(adev, params, frag_start, addr, count, AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); /* handle the 4K area at the end */ if (frag_end != pe_end) { addr += AMDGPU_GPU_PAGE_SIZE * count; count = (pe_end - frag_end) / 8; - amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr, + amdgpu_vm_update_pages(adev, params, frag_end, addr, count, AMDGPU_GPU_PAGE_SIZE, flags); } } @@ -788,7 +786,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, * amdgpu_vm_update_ptes - make sure that page tables are valid * * @adev: amdgpu_device pointer - * @vm_update_params: see amdgpu_vm_update_params definition + * @params: see amdgpu_pte_update_params definition * @vm: requested vm * @start: start of GPU address range * @end: end of GPU address range @@ -798,8 +796,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, * Update the page tables in the range @start - @end. */ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, - struct amdgpu_vm_update_params - *vm_update_params, + struct amdgpu_pte_update_params *params, struct amdgpu_vm *vm, uint64_t start, uint64_t end, uint64_t dst, uint32_t flags) @@ -852,7 +849,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, */ cur_pe_end += 8 * nptes; } else { - amdgpu_vm_frag_ptes(adev, vm_update_params, + amdgpu_vm_frag_ptes(adev, params, cur_pe_start, cur_pe_end, cur_dst, flags); @@ -866,7 +863,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, dst += nptes * AMDGPU_GPU_PAGE_SIZE; } - amdgpu_vm_frag_ptes(adev, vm_update_params, cur_pe_start, + amdgpu_vm_frag_ptes(adev, params, cur_pe_start, cur_pe_end, cur_dst, flags); } @@ -900,14 +897,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, void *owner = AMDGPU_FENCE_OWNER_VM; unsigned nptes, ncmds, ndw; struct amdgpu_job *job; - struct amdgpu_vm_update_params vm_update_params; + struct amdgpu_pte_update_params params; struct fence *f = NULL; int r; ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); - memset(&vm_update_params, 0, sizeof(vm_update_params)); - vm_update_params.src = src; - vm_update_params.pages_addr = pages_addr; + memset(¶ms, 0, sizeof(params)); + params.src = src; + params.pages_addr = pages_addr; /* sync to everything on unmapping */ if (!(flags & AMDGPU_PTE_VALID)) @@ -924,11 +921,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* padding, etc. */ ndw = 64; - if (vm_update_params.src) { + if (params.src) { /* only copy commands needed */ ndw += ncmds * 7; - } else if (vm_update_params.pages_addr) { + } else if (params.pages_addr) { /* header for write data commands */ ndw += ncmds * 4; @@ -947,7 +944,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) return r; - vm_update_params.ib = &job->ibs[0]; + params.ib = &job->ibs[0]; r = amdgpu_sync_fence(adev, &job->sync, exclusive); if (r) @@ -962,11 +959,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start, + amdgpu_vm_update_ptes(adev, ¶ms, vm, start, last + 1, addr, flags); - amdgpu_ring_pad_ib(ring, vm_update_params.ib); - WARN_ON(vm_update_params.ib->length_dw > ndw); + amdgpu_ring_pad_ib(ring, params.ib); + WARN_ON(params.ib->length_dw > ndw); r = amdgpu_job_submit(job, ring, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f); if (r) From 27c5f36fe138e29d63eea7d1445bda1ca64921d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 4 Aug 2016 15:02:49 +0200 Subject: [PATCH 105/171] drm/amdgpu: add adev to the pte_update_params MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No need to carry that forward as a separate parameter. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 65 ++++++++++++-------------- 1 file changed, 29 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 577abfd3879e..fd7901c1320f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -58,6 +58,8 @@ * the number of function parameters */ struct amdgpu_pte_update_params { + /* amdgpu device we do this update for */ + struct amdgpu_device *adev; /* address where to copy page table entries from */ uint64_t src; /* DMA addresses to use for mapping */ @@ -469,7 +471,6 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, /** * amdgpu_vm_update_pages - helper to call the right asic function * - * @adev: amdgpu_device pointer * @params: see amdgpu_pte_update_params definition * @pe: addr of the page entry * @addr: dst addr to write into pe @@ -480,8 +481,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, * Traces the parameters and calls the right asic functions * to setup the page table using the DMA. */ -static void amdgpu_vm_update_pages(struct amdgpu_device *adev, - struct amdgpu_pte_update_params *params, +static void amdgpu_vm_update_pages(struct amdgpu_pte_update_params *params, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) @@ -489,20 +489,20 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); if (params->src) { - amdgpu_vm_copy_pte(adev, params->ib, + amdgpu_vm_copy_pte(params->adev, params->ib, pe, (params->src + (addr >> 12) * 8), count); } else if (params->pages_addr) { - amdgpu_vm_write_pte(adev, params->ib, + amdgpu_vm_write_pte(params->adev, params->ib, params->pages_addr, pe, addr, count, incr, flags); } else if (count < 3) { - amdgpu_vm_write_pte(adev, params->ib, NULL, pe, addr, + amdgpu_vm_write_pte(params->adev, params->ib, NULL, pe, addr, count, incr, flags); } else { - amdgpu_vm_set_pte_pde(adev, params->ib, pe, addr, + amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr, count, incr, flags); } } @@ -527,7 +527,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, uint64_t addr; int r; - memset(¶ms, 0, sizeof(params)); ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); r = reservation_object_reserve_shared(bo->tbo.resv); @@ -545,9 +544,10 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, if (r) goto error; + memset(¶ms, 0, sizeof(params)); + params.adev = adev; params.ib = &job->ibs[0]; - amdgpu_vm_update_pages(adev, ¶ms, addr, 0, entries, - 0, 0); + amdgpu_vm_update_pages(¶ms, addr, 0, entries, 0, 0); amdgpu_ring_pad_ib(ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > 64); @@ -624,7 +624,6 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, int r; - memset(¶ms, 0, sizeof(params)); ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); /* padding, etc. */ @@ -637,6 +636,8 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, if (r) return r; + memset(¶ms, 0, sizeof(params)); + params.adev = adev; params.ib = &job->ibs[0]; /* walk over the address space and update the page directory */ @@ -657,9 +658,8 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ((last_pt + incr * count) != pt)) { if (count) { - amdgpu_vm_update_pages(adev, ¶ms, - last_pde, last_pt, - count, incr, + amdgpu_vm_update_pages(¶ms, last_pde, + last_pt, count, incr, AMDGPU_PTE_VALID); } @@ -672,8 +672,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, } if (count) - amdgpu_vm_update_pages(adev, ¶ms, - last_pde, last_pt, + amdgpu_vm_update_pages(¶ms, last_pde, last_pt, count, incr, AMDGPU_PTE_VALID); if (params.ib->length_dw != 0) { @@ -705,15 +704,13 @@ error_free: /** * amdgpu_vm_frag_ptes - add fragment information to PTEs * - * @adev: amdgpu_device pointer * @params: see amdgpu_pte_update_params definition * @pe_start: first PTE to handle * @pe_end: last PTE to handle * @addr: addr those PTEs should point to * @flags: hw mapping flags */ -static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, - struct amdgpu_pte_update_params *params, +static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, uint64_t pe_start, uint64_t pe_end, uint64_t addr, uint32_t flags) { @@ -754,38 +751,36 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { count = (pe_end - pe_start) / 8; - amdgpu_vm_update_pages(adev, params, pe_start, - addr, count, AMDGPU_GPU_PAGE_SIZE, - flags); + amdgpu_vm_update_pages(params, pe_start, addr, count, + AMDGPU_GPU_PAGE_SIZE, flags); return; } /* handle the 4K area at the beginning */ if (pe_start != frag_start) { count = (frag_start - pe_start) / 8; - amdgpu_vm_update_pages(adev, params, pe_start, addr, - count, AMDGPU_GPU_PAGE_SIZE, flags); + amdgpu_vm_update_pages(params, pe_start, addr, count, + AMDGPU_GPU_PAGE_SIZE, flags); addr += AMDGPU_GPU_PAGE_SIZE * count; } /* handle the area in the middle */ count = (frag_end - frag_start) / 8; - amdgpu_vm_update_pages(adev, params, frag_start, addr, count, + amdgpu_vm_update_pages(params, frag_start, addr, count, AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); /* handle the 4K area at the end */ if (frag_end != pe_end) { addr += AMDGPU_GPU_PAGE_SIZE * count; count = (pe_end - frag_end) / 8; - amdgpu_vm_update_pages(adev, params, frag_end, addr, - count, AMDGPU_GPU_PAGE_SIZE, flags); + amdgpu_vm_update_pages(params, frag_end, addr, count, + AMDGPU_GPU_PAGE_SIZE, flags); } } /** * amdgpu_vm_update_ptes - make sure that page tables are valid * - * @adev: amdgpu_device pointer * @params: see amdgpu_pte_update_params definition * @vm: requested vm * @start: start of GPU address range @@ -795,8 +790,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, * * Update the page tables in the range @start - @end. */ -static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, - struct amdgpu_pte_update_params *params, +static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, struct amdgpu_vm *vm, uint64_t start, uint64_t end, uint64_t dst, uint32_t flags) @@ -849,8 +843,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, */ cur_pe_end += 8 * nptes; } else { - amdgpu_vm_frag_ptes(adev, params, - cur_pe_start, cur_pe_end, + amdgpu_vm_frag_ptes(params, cur_pe_start, cur_pe_end, cur_dst, flags); cur_pe_start = next_pe_start; @@ -863,8 +856,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, dst += nptes * AMDGPU_GPU_PAGE_SIZE; } - amdgpu_vm_frag_ptes(adev, params, cur_pe_start, - cur_pe_end, cur_dst, flags); + amdgpu_vm_frag_ptes(params, cur_pe_start, cur_pe_end, cur_dst, flags); } /** @@ -902,7 +894,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, int r; ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); + memset(¶ms, 0, sizeof(params)); + params.adev = adev; params.src = src; params.pages_addr = pages_addr; @@ -959,8 +953,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - amdgpu_vm_update_ptes(adev, ¶ms, vm, start, - last + 1, addr, flags); + amdgpu_vm_update_ptes(¶ms, vm, start, last + 1, addr, flags); amdgpu_ring_pad_ib(ring, params.ib); WARN_ON(params.ib->length_dw > ndw); From 92696dd52e58b5caaee1dc027cf14f327f91d2e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 5 Aug 2016 13:56:35 +0200 Subject: [PATCH 106/171] drm/amdgpu: flip frag_ptes and update_pts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We can add the fragment params before we split the update for the page tables. That should save a few CPU cycles for larger updates. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 220 ++++++++++++------------- 1 file changed, 106 insertions(+), 114 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index fd7901c1320f..d2796bb4004b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -702,17 +702,102 @@ error_free: } /** + * amdgpu_vm_update_ptes - make sure that page tables are valid + * + * @params: see amdgpu_pte_update_params definition + * @vm: requested vm + * @start: start of GPU address range + * @end: end of GPU address range + * @dst: destination address to map to, the next dst inside the function + * @flags: mapping flags + * + * Update the page tables in the range @start - @end. + */ +static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, + struct amdgpu_vm *vm, + uint64_t start, uint64_t end, + uint64_t dst, uint32_t flags) +{ + const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; + + uint64_t cur_pe_start, cur_nptes, cur_dst; + uint64_t addr; /* next GPU address to be updated */ + uint64_t pt_idx; + struct amdgpu_bo *pt; + unsigned nptes; /* next number of ptes to be updated */ + uint64_t next_pe_start; + + /* initialize the variables */ + addr = start; + pt_idx = addr >> amdgpu_vm_block_size; + pt = vm->page_tables[pt_idx].entry.robj; + + if ((addr & ~mask) == (end & ~mask)) + nptes = end - addr; + else + nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); + + cur_pe_start = amdgpu_bo_gpu_offset(pt); + cur_pe_start += (addr & mask) * 8; + cur_nptes = nptes; + cur_dst = dst; + + /* for next ptb*/ + addr += nptes; + dst += nptes * AMDGPU_GPU_PAGE_SIZE; + + /* walk over the address space and update the page tables */ + while (addr < end) { + pt_idx = addr >> amdgpu_vm_block_size; + pt = vm->page_tables[pt_idx].entry.robj; + + if ((addr & ~mask) == (end & ~mask)) + nptes = end - addr; + else + nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); + + next_pe_start = amdgpu_bo_gpu_offset(pt); + next_pe_start += (addr & mask) * 8; + + if ((cur_pe_start + 8 * cur_nptes) == next_pe_start) { + /* The next ptb is consecutive to current ptb. + * Don't call amdgpu_vm_update_pages now. + * Will update two ptbs together in future. + */ + cur_nptes += nptes; + } else { + amdgpu_vm_update_pages(params, cur_pe_start, cur_dst, + cur_nptes, AMDGPU_GPU_PAGE_SIZE, + flags); + + cur_pe_start = next_pe_start; + cur_nptes = nptes; + cur_dst = dst; + } + + /* for next ptb*/ + addr += nptes; + dst += nptes * AMDGPU_GPU_PAGE_SIZE; + } + + amdgpu_vm_update_pages(params, cur_pe_start, cur_dst, cur_nptes, + AMDGPU_GPU_PAGE_SIZE, flags); +} + +/* * amdgpu_vm_frag_ptes - add fragment information to PTEs * * @params: see amdgpu_pte_update_params definition - * @pe_start: first PTE to handle - * @pe_end: last PTE to handle - * @addr: addr those PTEs should point to + * @vm: requested vm + * @start: first PTE to handle + * @end: last PTE to handle + * @dst: addr those PTEs should point to * @flags: hw mapping flags */ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, - uint64_t pe_start, uint64_t pe_end, - uint64_t addr, uint32_t flags) + struct amdgpu_vm *vm, + uint64_t start, uint64_t end, + uint64_t dst, uint32_t flags) { /** * The MC L1 TLB supports variable sized pages, based on a fragment @@ -735,130 +820,37 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, /* SI and newer are optimized for 64KB */ uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG); - uint64_t frag_align = 0x80; + uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG; - uint64_t frag_start = ALIGN(pe_start, frag_align); - uint64_t frag_end = pe_end & ~(frag_align - 1); - - unsigned count; - - /* Abort early if there isn't anything to do */ - if (pe_start == pe_end) - return; + uint64_t frag_start = ALIGN(start, frag_align); + uint64_t frag_end = end & ~(frag_align - 1); /* system pages are non continuously */ - if (params->src || params->pages_addr || - !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { + if (params->src || params->pages_addr || !(flags & AMDGPU_PTE_VALID) || + (frag_start >= frag_end)) { - count = (pe_end - pe_start) / 8; - amdgpu_vm_update_pages(params, pe_start, addr, count, - AMDGPU_GPU_PAGE_SIZE, flags); + amdgpu_vm_update_ptes(params, vm, start, end, dst, flags); return; } /* handle the 4K area at the beginning */ - if (pe_start != frag_start) { - count = (frag_start - pe_start) / 8; - amdgpu_vm_update_pages(params, pe_start, addr, count, - AMDGPU_GPU_PAGE_SIZE, flags); - addr += AMDGPU_GPU_PAGE_SIZE * count; + if (start != frag_start) { + amdgpu_vm_update_ptes(params, vm, start, frag_start, + dst, flags); + dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE; } /* handle the area in the middle */ - count = (frag_end - frag_start) / 8; - amdgpu_vm_update_pages(params, frag_start, addr, count, - AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); + amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst, + flags | frag_flags); /* handle the 4K area at the end */ - if (frag_end != pe_end) { - addr += AMDGPU_GPU_PAGE_SIZE * count; - count = (pe_end - frag_end) / 8; - amdgpu_vm_update_pages(params, frag_end, addr, count, - AMDGPU_GPU_PAGE_SIZE, flags); + if (frag_end != end) { + dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE; + amdgpu_vm_update_ptes(params, vm, frag_end, end, dst, flags); } } -/** - * amdgpu_vm_update_ptes - make sure that page tables are valid - * - * @params: see amdgpu_pte_update_params definition - * @vm: requested vm - * @start: start of GPU address range - * @end: end of GPU address range - * @dst: destination address to map to, the next dst inside the function - * @flags: mapping flags - * - * Update the page tables in the range @start - @end. - */ -static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, - struct amdgpu_vm *vm, - uint64_t start, uint64_t end, - uint64_t dst, uint32_t flags) -{ - const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; - - uint64_t cur_pe_start, cur_pe_end, cur_dst; - uint64_t addr; /* next GPU address to be updated */ - uint64_t pt_idx; - struct amdgpu_bo *pt; - unsigned nptes; /* next number of ptes to be updated */ - uint64_t next_pe_start; - - /* initialize the variables */ - addr = start; - pt_idx = addr >> amdgpu_vm_block_size; - pt = vm->page_tables[pt_idx].entry.robj; - - if ((addr & ~mask) == (end & ~mask)) - nptes = end - addr; - else - nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); - - cur_pe_start = amdgpu_bo_gpu_offset(pt); - cur_pe_start += (addr & mask) * 8; - cur_pe_end = cur_pe_start + 8 * nptes; - cur_dst = dst; - - /* for next ptb*/ - addr += nptes; - dst += nptes * AMDGPU_GPU_PAGE_SIZE; - - /* walk over the address space and update the page tables */ - while (addr < end) { - pt_idx = addr >> amdgpu_vm_block_size; - pt = vm->page_tables[pt_idx].entry.robj; - - if ((addr & ~mask) == (end & ~mask)) - nptes = end - addr; - else - nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); - - next_pe_start = amdgpu_bo_gpu_offset(pt); - next_pe_start += (addr & mask) * 8; - - if (cur_pe_end == next_pe_start) { - /* The next ptb is consecutive to current ptb. - * Don't call amdgpu_vm_frag_ptes now. - * Will update two ptbs together in future. - */ - cur_pe_end += 8 * nptes; - } else { - amdgpu_vm_frag_ptes(params, cur_pe_start, cur_pe_end, - cur_dst, flags); - - cur_pe_start = next_pe_start; - cur_pe_end = next_pe_start + 8 * nptes; - cur_dst = dst; - } - - /* for next ptb*/ - addr += nptes; - dst += nptes * AMDGPU_GPU_PAGE_SIZE; - } - - amdgpu_vm_frag_ptes(params, cur_pe_start, cur_pe_end, cur_dst, flags); -} - /** * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table * @@ -953,7 +945,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - amdgpu_vm_update_ptes(¶ms, vm, start, last + 1, addr, flags); + amdgpu_vm_frag_ptes(¶ms, vm, start, last + 1, addr, flags); amdgpu_ring_pad_ib(ring, params.ib); WARN_ON(params.ib->length_dw > ndw); From e2b84e4be37462e77f34b5bd057d2483fcd3ca98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 8 Aug 2016 14:40:18 +0200 Subject: [PATCH 107/171] drm/amdgpu: use more than 64KB fragment size if possible MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We align to 64KB, but when userspace aligns even more we can easily use more. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d2796bb4004b..7f385e78a265 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -818,13 +818,13 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, * allocation size to the fragment size. */ - /* SI and newer are optimized for 64KB */ - uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG); - uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG; + const uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG; uint64_t frag_start = ALIGN(start, frag_align); uint64_t frag_end = end & ~(frag_align - 1); + uint32_t frag; + /* system pages are non continuously */ if (params->src || params->pages_addr || !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { @@ -833,6 +833,10 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, return; } + /* use more than 64KB fragment size if possible */ + frag = lower_32_bits(frag_start | frag_end); + frag = likely(frag) ? __ffs(frag) : 31; + /* handle the 4K area at the beginning */ if (start != frag_start) { amdgpu_vm_update_ptes(params, vm, start, frag_start, @@ -842,7 +846,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, /* handle the area in the middle */ amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst, - flags | frag_flags); + flags | AMDGPU_PTE_FRAG(frag)); /* handle the 4K area at the end */ if (frag_end != end) { From e7b26d12cd44cff6ad25db7896ed4c2e1f1848ae Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Tue, 9 Aug 2016 13:30:27 +0200 Subject: [PATCH 108/171] gpu: drm: amd: amdgpu: amdgpu_i2c: don't print error when adding adapter fails The core will do this for us now. Signed-off-by: Wolfram Sang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index 31a676376d73..c93a92a840ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -186,10 +186,8 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, "AMDGPU i2c hw bus %s", name); i2c->adapter.algo = &amdgpu_atombios_i2c_algo; ret = i2c_add_adapter(&i2c->adapter); - if (ret) { - DRM_ERROR("Failed to register hw i2c %s\n", name); + if (ret) goto out_free; - } } else { /* set the amdgpu bit adapter */ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), From 88afb9e6cd3c3266abe9632dd99c60ca0581d8dc Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Tue, 9 Aug 2016 13:30:30 +0200 Subject: [PATCH 109/171] gpu: drm: radeon: radeon_i2c: don't print error when adding adapter fails The core will do this for us now. Signed-off-by: Wolfram Sang Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_i2c.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 9590bcd321c0..021aa005623f 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -938,10 +938,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, "Radeon i2c hw bus %s", name); i2c->adapter.algo = &radeon_i2c_algo; ret = i2c_add_adapter(&i2c->adapter); - if (ret) { - DRM_ERROR("Failed to register hw i2c %s\n", name); + if (ret) goto out_free; - } } else if (rec->hw_capable && radeon_hw_i2c && ASIC_IS_DCE3(rdev)) { @@ -950,10 +948,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, "Radeon i2c hw bus %s", name); i2c->adapter.algo = &radeon_atom_i2c_algo; ret = i2c_add_adapter(&i2c->adapter); - if (ret) { - DRM_ERROR("Failed to register hw i2c %s\n", name); + if (ret) goto out_free; - } } else { /* set the radeon bit adapter */ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), From c229bfbbd04ac112bd15331d3a06d12e4e86a45c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Mon, 8 Aug 2016 16:23:03 +0900 Subject: [PATCH 110/171] drm: Add page_flip_target CRTC hook v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mostly the same as the existing page_flip hook, but takes an additional parameter specifying the target vertical blank period when the flip should take effect. v2: * Add curly braces around else statement corresponding to an if block with curly braces (Alex Deucher) * Call drm_crtc_vblank_put in the error case (Daniel Vetter) * Clarify entry point documentation comment (Daniel Vetter) Acked-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Daniel Vetter Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/drm_crtc.c | 26 ++++++++++++++++++++++---- include/drm/drm_crtc.h | 18 ++++++++++++++++++ 2 files changed, 40 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index b1dbb60af99f..a316f7336aeb 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -5402,6 +5402,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, struct drm_crtc *crtc; struct drm_framebuffer *fb = NULL; struct drm_pending_vblank_event *e = NULL; + u32 target_vblank = 0; int ret = -EINVAL; if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || @@ -5415,6 +5416,19 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, if (!crtc) return -ENOENT; + if (crtc->funcs->page_flip_target) { + int r; + + r = drm_crtc_vblank_get(crtc); + if (r) + return r; + + target_vblank = drm_crtc_vblank_count(crtc) + + !(page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC); + } else if (crtc->funcs->page_flip == NULL) { + return -EINVAL; + } + drm_modeset_lock_crtc(crtc, crtc->primary); if (crtc->primary->fb == NULL) { /* The framebuffer is currently unbound, presumably @@ -5425,9 +5439,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, goto out; } - if (crtc->funcs->page_flip == NULL) - goto out; - fb = drm_framebuffer_lookup(dev, page_flip->fb_id); if (!fb) { ret = -ENOENT; @@ -5468,7 +5479,12 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, } crtc->primary->old_fb = crtc->primary->fb; - ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags); + if (crtc->funcs->page_flip_target) + ret = crtc->funcs->page_flip_target(crtc, fb, e, + page_flip->flags, + target_vblank); + else + ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags); if (ret) { if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) drm_event_cancel_free(dev, &e->base); @@ -5481,6 +5497,8 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, } out: + if (ret) + drm_crtc_vblank_put(crtc); if (fb) drm_framebuffer_unreference(fb); if (crtc->primary->old_fb) diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 44e070800b6d..3c90563e1a54 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -580,6 +580,24 @@ struct drm_crtc_funcs { struct drm_pending_vblank_event *event, uint32_t flags); + /** + * @page_flip_target: + * + * Same as @page_flip but with an additional parameter specifying the + * absolute target vertical blank period (as reported by + * drm_crtc_vblank_count()) when the flip should take effect. + * + * Note that the core code calls drm_crtc_vblank_get before this entry + * point, and will call drm_crtc_vblank_put if this entry point returns + * any non-0 error code. It's the driver's responsibility to call + * drm_crtc_vblank_put after this entry point returns 0, typically when + * the flip completes. + */ + int (*page_flip_target)(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags, uint32_t target); + /** * @set_property: * From 325cbba19a32b172300fc87cd4da7c45c4f9e0ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Thu, 4 Aug 2016 12:39:37 +0900 Subject: [PATCH 111/171] drm/amdgpu: Provide page_flip_target hook MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now we can program a flip during a vertical blank period, if it's the one targeted by the flip (or a later one). This allows simplifying amdgpu_flip_work_func considerably. agd: update dce_virtual.c as well. Acked-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 96 +++++++-------------- drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h | 8 +- drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 2 +- 7 files changed, 39 insertions(+), 76 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 3d128c16334d..c54f5b962b80 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -730,10 +730,11 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, */ struct amdgpu_flip_work { - struct work_struct flip_work; + struct delayed_work flip_work; struct work_struct unpin_work; struct amdgpu_device *adev; int crtc_id; + u32 target_vblank; uint64_t base; struct drm_pending_vblank_event *event; struct amdgpu_bo *old_rbo; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index abeb6aaf89a9..9af8d3c7ae8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -41,7 +41,7 @@ static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb) container_of(cb, struct amdgpu_flip_work, cb); fence_put(f); - schedule_work(&work->flip_work); + schedule_work(&work->flip_work.work); } static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, @@ -63,16 +63,17 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, static void amdgpu_flip_work_func(struct work_struct *__work) { + struct delayed_work *delayed_work = + container_of(__work, struct delayed_work, work); struct amdgpu_flip_work *work = - container_of(__work, struct amdgpu_flip_work, flip_work); + container_of(delayed_work, struct amdgpu_flip_work, flip_work); struct amdgpu_device *adev = work->adev; struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id]; struct drm_crtc *crtc = &amdgpuCrtc->base; unsigned long flags; - unsigned i, repcnt = 4; - int vpos, hpos, stat, min_udelay = 0; - struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; + unsigned i; + int vpos, hpos; if (amdgpu_flip_handle_fence(work, &work->excl)) return; @@ -81,55 +82,23 @@ static void amdgpu_flip_work_func(struct work_struct *__work) if (amdgpu_flip_handle_fence(work, &work->shared[i])) return; - /* We borrow the event spin lock for protecting flip_status */ - spin_lock_irqsave(&crtc->dev->event_lock, flags); - - /* If this happens to execute within the "virtually extended" vblank - * interval before the start of the real vblank interval then it needs - * to delay programming the mmio flip until the real vblank is entered. - * This prevents completing a flip too early due to the way we fudge - * our vblank counter and vblank timestamps in order to work around the - * problem that the hw fires vblank interrupts before actual start of - * vblank (when line buffer refilling is done for a frame). It - * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for - * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts. - * - * In practice this won't execute very often unless on very fast - * machines because the time window for this to happen is very small. + /* Wait until we're out of the vertical blank period before the one + * targeted by the flip */ - while (amdgpuCrtc->enabled && --repcnt) { - /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank - * start in hpos, and to the "fudged earlier" vblank start in - * vpos. - */ - stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, - GET_DISTANCE_TO_VBLANKSTART, - &vpos, &hpos, NULL, NULL, - &crtc->hwmode); - - if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != - (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) || - !(vpos >= 0 && hpos <= 0)) - break; - - /* Sleep at least until estimated real start of hw vblank */ - min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); - if (min_udelay > vblank->framedur_ns / 2000) { - /* Don't wait ridiculously long - something is wrong */ - repcnt = 0; - break; - } - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - usleep_range(min_udelay, 2 * min_udelay); - spin_lock_irqsave(&crtc->dev->event_lock, flags); + if (amdgpuCrtc->enabled && + (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0, + &vpos, &hpos, NULL, NULL, + &crtc->hwmode) + & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == + (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && + (int)(work->target_vblank - + amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) { + schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000)); + return; } - if (!repcnt) - DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, " - "framedur %d, linedur %d, stat %d, vpos %d, " - "hpos %d\n", work->crtc_id, min_udelay, - vblank->framedur_ns / 1000, - vblank->linedur_ns / 1000, stat, vpos, hpos); + /* We borrow the event spin lock for protecting flip_status */ + spin_lock_irqsave(&crtc->dev->event_lock, flags); /* Do the flip (mmio) */ adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async); @@ -169,10 +138,10 @@ static void amdgpu_unpin_work_func(struct work_struct *__work) kfree(work); } -int amdgpu_crtc_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags) +int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags, uint32_t target) { struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; @@ -191,7 +160,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, if (work == NULL) return -ENOMEM; - INIT_WORK(&work->flip_work, amdgpu_flip_work_func); + INIT_DELAYED_WORK(&work->flip_work, amdgpu_flip_work_func); INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func); work->event = event; @@ -237,12 +206,8 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, amdgpu_bo_unreserve(new_rbo); work->base = base; - - r = drm_crtc_vblank_get(crtc); - if (r) { - DRM_ERROR("failed to get vblank before flip\n"); - goto pflip_cleanup; - } + work->target_vblank = target - drm_crtc_vblank_count(crtc) + + amdgpu_get_vblank_counter_kms(dev, work->crtc_id); /* we borrow the event spin lock for protecting flip_wrok */ spin_lock_irqsave(&crtc->dev->event_lock, flags); @@ -250,7 +215,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); spin_unlock_irqrestore(&crtc->dev->event_lock, flags); r = -EBUSY; - goto vblank_cleanup; + goto pflip_cleanup; } amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING; @@ -262,12 +227,9 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, /* update crtc fb */ crtc->primary->fb = fb; spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - amdgpu_flip_work_func(&work->flip_work); + amdgpu_flip_work_func(&work->flip_work.work); return 0; -vblank_cleanup: - drm_crtc_vblank_put(crtc); - pflip_cleanup: if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) { DRM_ERROR("failed to reserve new rbo in error path\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index b1ae33bdbd84..7b0eff7d060b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -591,10 +591,10 @@ int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tile void amdgpu_print_display_setup(struct drm_device *dev); int amdgpu_modeset_create_props(struct amdgpu_device *adev); int amdgpu_crtc_set_config(struct drm_mode_set *set); -int amdgpu_crtc_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags); +int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags, uint32_t target); extern const struct drm_mode_config_funcs amdgpu_mode_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index b888d7223e8e..a0964c99e3a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2737,7 +2737,7 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = { .gamma_set = dce_v10_0_crtc_gamma_set, .set_config = amdgpu_crtc_set_config, .destroy = dce_v10_0_crtc_destroy, - .page_flip = amdgpu_crtc_page_flip, + .page_flip_target = amdgpu_crtc_page_flip_target, }; static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index cfadd79d2580..7d8417ae0ce6 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2755,7 +2755,7 @@ static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = { .gamma_set = dce_v11_0_crtc_gamma_set, .set_config = amdgpu_crtc_set_config, .destroy = dce_v11_0_crtc_destroy, - .page_flip = amdgpu_crtc_page_flip, + .page_flip_target = amdgpu_crtc_page_flip_target, }; static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index e4467b8ff40c..7badb0022a5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2598,7 +2598,7 @@ static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = { .gamma_set = dce_v8_0_crtc_gamma_set, .set_config = amdgpu_crtc_set_config, .destroy = dce_v8_0_crtc_destroy, - .page_flip = amdgpu_crtc_page_flip, + .page_flip_target = amdgpu_crtc_page_flip_target, }; static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 5499693f1dcf..6f533a0dbdb4 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -188,7 +188,7 @@ static const struct drm_crtc_funcs dce_virtual_crtc_funcs = { .gamma_set = dce_virtual_crtc_gamma_set, .set_config = amdgpu_crtc_set_config, .destroy = dce_virtual_crtc_destroy, - .page_flip = amdgpu_crtc_page_flip, + .page_flip_target = amdgpu_crtc_page_flip_target, }; static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode) From 3fd4b751c5409f4b9bf67d12b26356406b2af94c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Thu, 4 Aug 2016 12:39:38 +0900 Subject: [PATCH 112/171] drm/amdgpu: Set MASTER_UPDATE_MODE to 0 again MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With the previous change, it's safe to let page flips take effect anytime during a vertical blank period. This can avoid delaying a flip by a frame in some cases where we get to amdgpu_flip_work_func -> adev->mode_info.funcs->page_flip during a vertical blank period. Acked-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index a0964c99e3a9..db6e6c3eaae2 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -646,8 +646,8 @@ static void dce_v10_0_resume_mc_access(struct amdgpu_device *adev, if (save->crtc_enabled[i]) { tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3); + if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 0) { + tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 0); WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp); } tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); @@ -2314,8 +2314,8 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, (viewport_w << 16) | viewport_h); - /* set pageflip to happen only at start of vblank interval (front porch) */ - WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); + /* set pageflip to happen anywhere in vblank interval */ + WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->primary->fb) { amdgpu_fb = to_amdgpu_framebuffer(fb); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 7d8417ae0ce6..a35ac3a859ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2297,8 +2297,8 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, (viewport_w << 16) | viewport_h); - /* set pageflip to happen only at start of vblank interval (front porch) */ - WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); + /* set pageflip to happen anywhere in vblank interval */ + WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->primary->fb) { amdgpu_fb = to_amdgpu_framebuffer(fb); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 7badb0022a5e..9fcf74032885 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2183,8 +2183,8 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, (viewport_w << 16) | viewport_h); - /* set pageflip to happen only at start of vblank interval (front porch) */ - WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); + /* set pageflip to happen anywhere in vblank interval */ + WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->primary->fb) { amdgpu_fb = to_amdgpu_framebuffer(fb); From b8fc75cfee43232e12673ddebb3539ea805105ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Thu, 4 Aug 2016 12:39:39 +0900 Subject: [PATCH 113/171] drm/radeon: Provide page_flip_target hook MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now we can program a flip during a vertical blank period, if it's the one targeted by the flip (or a later one). This allows simplifying radeon_flip_work_func considerably. Acked-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon.h | 1 + drivers/gpu/drm/radeon/radeon_display.c | 89 +++++++------------------ 2 files changed, 25 insertions(+), 65 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5633ee3eb46e..1b0dcad916b0 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -742,6 +742,7 @@ struct radeon_flip_work { struct work_struct unpin_work; struct radeon_device *rdev; int crtc_id; + u32 target_vblank; uint64_t base; struct drm_pending_vblank_event *event; struct radeon_bo *old_rbo; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index c3206fb8f4cf..890171f08987 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -400,14 +400,13 @@ static void radeon_flip_work_func(struct work_struct *__work) struct radeon_flip_work *work = container_of(__work, struct radeon_flip_work, flip_work); struct radeon_device *rdev = work->rdev; + struct drm_device *dev = rdev->ddev; struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; struct drm_crtc *crtc = &radeon_crtc->base; unsigned long flags; int r; - int vpos, hpos, stat, min_udelay = 0; - unsigned repcnt = 4; - struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; + int vpos, hpos; down_read(&rdev->exclusive_lock); if (work->fence) { @@ -438,59 +437,25 @@ static void radeon_flip_work_func(struct work_struct *__work) work->fence = NULL; } + /* Wait until we're out of the vertical blank period before the one + * targeted by the flip + */ + while (radeon_crtc->enabled && + (radeon_get_crtc_scanoutpos(dev, work->crtc_id, 0, + &vpos, &hpos, NULL, NULL, + &crtc->hwmode) + & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == + (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && + (int)(work->target_vblank - + dev->driver->get_vblank_counter(dev, work->crtc_id)) > 0) + usleep_range(1000, 2000); + /* We borrow the event spin lock for protecting flip_status */ spin_lock_irqsave(&crtc->dev->event_lock, flags); /* set the proper interrupt */ radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); - /* If this happens to execute within the "virtually extended" vblank - * interval before the start of the real vblank interval then it needs - * to delay programming the mmio flip until the real vblank is entered. - * This prevents completing a flip too early due to the way we fudge - * our vblank counter and vblank timestamps in order to work around the - * problem that the hw fires vblank interrupts before actual start of - * vblank (when line buffer refilling is done for a frame). It - * complements the fudging logic in radeon_get_crtc_scanoutpos() for - * timestamping and radeon_get_vblank_counter_kms() for vblank counts. - * - * In practice this won't execute very often unless on very fast - * machines because the time window for this to happen is very small. - */ - while (radeon_crtc->enabled && --repcnt) { - /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank - * start in hpos, and to the "fudged earlier" vblank start in - * vpos. - */ - stat = radeon_get_crtc_scanoutpos(rdev->ddev, work->crtc_id, - GET_DISTANCE_TO_VBLANKSTART, - &vpos, &hpos, NULL, NULL, - &crtc->hwmode); - - if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != - (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) || - !(vpos >= 0 && hpos <= 0)) - break; - - /* Sleep at least until estimated real start of hw vblank */ - min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); - if (min_udelay > vblank->framedur_ns / 2000) { - /* Don't wait ridiculously long - something is wrong */ - repcnt = 0; - break; - } - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - usleep_range(min_udelay, 2 * min_udelay); - spin_lock_irqsave(&crtc->dev->event_lock, flags); - }; - - if (!repcnt) - DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, " - "framedur %d, linedur %d, stat %d, vpos %d, " - "hpos %d\n", work->crtc_id, min_udelay, - vblank->framedur_ns / 1000, - vblank->linedur_ns / 1000, stat, vpos, hpos); - /* do the flip (mmio) */ radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base, work->async); @@ -499,10 +464,11 @@ static void radeon_flip_work_func(struct work_struct *__work) up_read(&rdev->exclusive_lock); } -static int radeon_crtc_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags) +static int radeon_crtc_page_flip_target(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags, + uint32_t target) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; @@ -599,12 +565,8 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, base &= ~7; } work->base = base; - - r = drm_crtc_vblank_get(crtc); - if (r) { - DRM_ERROR("failed to get vblank before flip\n"); - goto pflip_cleanup; - } + work->target_vblank = target - drm_crtc_vblank_count(crtc) + + dev->driver->get_vblank_counter(dev, work->crtc_id); /* We borrow the event spin lock for protecting flip_work */ spin_lock_irqsave(&crtc->dev->event_lock, flags); @@ -613,7 +575,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); spin_unlock_irqrestore(&crtc->dev->event_lock, flags); r = -EBUSY; - goto vblank_cleanup; + goto pflip_cleanup; } radeon_crtc->flip_status = RADEON_FLIP_PENDING; radeon_crtc->flip_work = work; @@ -626,9 +588,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, queue_work(radeon_crtc->flip_queue, &work->flip_work); return 0; -vblank_cleanup: - drm_crtc_vblank_put(crtc); - pflip_cleanup: if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) { DRM_ERROR("failed to reserve new rbo in error path\n"); @@ -697,7 +656,7 @@ static const struct drm_crtc_funcs radeon_crtc_funcs = { .gamma_set = radeon_crtc_gamma_set, .set_config = radeon_crtc_set_config, .destroy = radeon_crtc_destroy, - .page_flip = radeon_crtc_page_flip, + .page_flip_target = radeon_crtc_page_flip_target, }; static void radeon_crtc_init(struct drm_device *dev, int index) From 5dd20bbae8a39100e1cf4a7fcc8ecd598c09d992 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Thu, 4 Aug 2016 12:39:40 +0900 Subject: [PATCH 114/171] drm/radeon: Set MASTER_UPDATE_MODE to 0 again MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With the previous change, it's safe to let page flips take effect anytime during a vertical blank period. This can avoid delaying a flip by a frame in some cases where we get to radeon_flip_work_func -> adev->mode_info.funcs->page_flip during a vertical blank period. Acked-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/atombios_crtc.c | 8 ++++---- drivers/gpu/drm/radeon/evergreen.c | 3 +-- drivers/gpu/drm/radeon/rv515.c | 3 +-- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index a97abc8af657..2029e35d50a4 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1433,8 +1433,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, (viewport_w << 16) | viewport_h); - /* set pageflip to happen only at start of vblank interval (front porch) */ - WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3); + /* set pageflip to happen anywhere in vblank interval */ + WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->primary->fb) { radeon_fb = to_radeon_framebuffer(fb); @@ -1632,8 +1632,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, (viewport_w << 16) | viewport_h); - /* set pageflip to happen only at start of vblank interval (front porch) */ - WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3); + /* set pageflip to happen anywhere in vblank interval */ + WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->primary->fb) { radeon_fb = to_radeon_framebuffer(fb); diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index db275b7ed34a..f95db0c1a17a 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2878,9 +2878,8 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s for (i = 0; i < rdev->num_crtc; i++) { if (save->crtc_enabled[i]) { tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]); - if ((tmp & 0x7) != 3) { + if ((tmp & 0x7) != 0) { tmp &= ~0x7; - tmp |= 0x3; WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); } tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index c55d653aaf5f..76c55c5d11ec 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -406,9 +406,8 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) for (i = 0; i < rdev->num_crtc; i++) { if (save->crtc_enabled[i]) { tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]); - if ((tmp & 0x7) != 3) { + if ((tmp & 0x7) != 0) { tmp &= ~0x7; - tmp |= 0x3; WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); } tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]); From f837297ad82480024d3ad08cd84f6670bcafa862 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Mon, 8 Aug 2016 16:23:39 +0900 Subject: [PATCH 115/171] drm: Add DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These flags allow userspace to explicitly specify the target vertical blank period when a flip should take effect. v2: * Add new struct drm_mode_crtc_page_flip_target instead of modifying struct drm_mode_crtc_page_flip, to make sure all existing userspace code keeps compiling (Daniel Vetter) Acked-by: Christian König Reviewed-by: Daniel Vetter Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/drm_crtc.c | 48 +++++++++++++++++++++++++++++++------ drivers/gpu/drm/drm_ioctl.c | 8 +++++++ include/uapi/drm/drm.h | 1 + include/uapi/drm/drm_mode.h | 39 +++++++++++++++++++++++++++--- 4 files changed, 86 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index a316f7336aeb..93b9821f7f24 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -5398,15 +5398,23 @@ out: int drm_mode_page_flip_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_mode_crtc_page_flip *page_flip = data; + struct drm_mode_crtc_page_flip_target *page_flip = data; struct drm_crtc *crtc; struct drm_framebuffer *fb = NULL; struct drm_pending_vblank_event *e = NULL; - u32 target_vblank = 0; + u32 target_vblank = page_flip->sequence; int ret = -EINVAL; - if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || - page_flip->reserved != 0) + if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS) + return -EINVAL; + + if (page_flip->sequence != 0 && !(page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET)) + return -EINVAL; + + /* Only one of the DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags + * can be specified + */ + if ((page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) == DRM_MODE_PAGE_FLIP_TARGET) return -EINVAL; if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip) @@ -5417,15 +5425,41 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, return -ENOENT; if (crtc->funcs->page_flip_target) { + u32 current_vblank; int r; r = drm_crtc_vblank_get(crtc); if (r) return r; - target_vblank = drm_crtc_vblank_count(crtc) + - !(page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC); - } else if (crtc->funcs->page_flip == NULL) { + current_vblank = drm_crtc_vblank_count(crtc); + + switch (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) { + case DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE: + if ((int)(target_vblank - current_vblank) > 1) { + DRM_DEBUG("Invalid absolute flip target %u, " + "must be <= %u\n", target_vblank, + current_vblank + 1); + drm_crtc_vblank_put(crtc); + return -EINVAL; + } + break; + case DRM_MODE_PAGE_FLIP_TARGET_RELATIVE: + if (target_vblank != 0 && target_vblank != 1) { + DRM_DEBUG("Invalid relative flip target %u, " + "must be 0 or 1\n", target_vblank); + drm_crtc_vblank_put(crtc); + return -EINVAL; + } + target_vblank += current_vblank; + break; + default: + target_vblank = current_vblank + + !(page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC); + break; + } + } else if (crtc->funcs->page_flip == NULL || + (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET)) { return -EINVAL; } diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 33af4a5ddca1..0099d2a4d618 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -228,6 +228,7 @@ static int drm_getstats(struct drm_device *dev, void *data, static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_get_cap *req = data; + struct drm_crtc *crtc; req->value = 0; switch (req->capability) { @@ -254,6 +255,13 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_ case DRM_CAP_ASYNC_PAGE_FLIP: req->value = dev->mode_config.async_page_flip; break; + case DRM_CAP_PAGE_FLIP_TARGET: + req->value = 1; + drm_for_each_crtc(crtc, dev) { + if (!crtc->funcs->page_flip_target) + req->value = 0; + } + break; case DRM_CAP_CURSOR_WIDTH: if (dev->mode_config.cursor_width) req->value = dev->mode_config.cursor_width; diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 452675fb55d9..b2c52843bc70 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -646,6 +646,7 @@ struct drm_gem_open { #define DRM_CAP_CURSOR_WIDTH 0x8 #define DRM_CAP_CURSOR_HEIGHT 0x9 #define DRM_CAP_ADDFB2_MODIFIERS 0x10 +#define DRM_CAP_PAGE_FLIP_TARGET 0x11 /** DRM_IOCTL_GET_CAP ioctl argument type */ struct drm_get_cap { diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 49a72659b801..df0e3504c349 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -520,7 +520,13 @@ struct drm_color_lut { #define DRM_MODE_PAGE_FLIP_EVENT 0x01 #define DRM_MODE_PAGE_FLIP_ASYNC 0x02 -#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC) +#define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4 +#define DRM_MODE_PAGE_FLIP_TARGET_RELATIVE 0x8 +#define DRM_MODE_PAGE_FLIP_TARGET (DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE | \ + DRM_MODE_PAGE_FLIP_TARGET_RELATIVE) +#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT | \ + DRM_MODE_PAGE_FLIP_ASYNC | \ + DRM_MODE_PAGE_FLIP_TARGET) /* * Request a page flip on the specified crtc. @@ -543,8 +549,7 @@ struct drm_color_lut { * 'as soon as possible', meaning that it not delay waiting for vblank. * This may cause tearing on the screen. * - * The reserved field must be zero until we figure out something - * clever to use it for. + * The reserved field must be zero. */ struct drm_mode_crtc_page_flip { @@ -555,6 +560,34 @@ struct drm_mode_crtc_page_flip { __u64 user_data; }; +/* + * Request a page flip on the specified crtc. + * + * Same as struct drm_mode_crtc_page_flip, but supports new flags and + * re-purposes the reserved field: + * + * The sequence field must be zero unless either of the + * DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags is specified. When + * the ABSOLUTE flag is specified, the sequence field denotes the absolute + * vblank sequence when the flip should take effect. When the RELATIVE + * flag is specified, the sequence field denotes the relative (to the + * current one when the ioctl is called) vblank sequence when the flip + * should take effect. NOTE: DRM_IOCTL_WAIT_VBLANK must still be used to + * make sure the vblank sequence before the target one has passed before + * calling this ioctl. The purpose of the + * DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags is merely to clarify + * the target for when code dealing with a page flip runs during a + * vertical blank period. + */ + +struct drm_mode_crtc_page_flip_target { + __u32 crtc_id; + __u32 fb_id; + __u32 flags; + __u32 sequence; + __u64 user_data; +}; + /* create a dumb scanout buffer */ struct drm_mode_create_dumb { __u32 height; From 75cd45a497812d531cfad8b816485f84e9c082e9 Mon Sep 17 00:00:00 2001 From: Alexandre Demers Date: Wed, 10 Aug 2016 03:02:04 -0400 Subject: [PATCH 116/171] Fixing copy-paste errors and removing unneeded newlines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Alexandre Demers Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 9fcf74032885..efca1b972813 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -1659,7 +1659,7 @@ static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; uint32_t offset = dig->afmt->offset; - WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT)); + WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT)); WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz); WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT)); @@ -2045,7 +2045,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, case DRM_FORMAT_XRGB4444: case DRM_FORMAT_ARGB4444: fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | - (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); + (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); #ifdef __BIG_ENDIAN fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); #endif @@ -2699,7 +2699,7 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc) case ATOM_PPLL2: /* disable the ppll */ amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, - 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); + 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); break; case ATOM_PPLL0: /* disable the ppll */ @@ -3281,7 +3281,6 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev, drm_handle_vblank(adev->ddev, crtc); } DRM_DEBUG("IH: D%d vblank\n", crtc + 1); - break; case 1: /* vline */ if (disp_int & interrupt_status_offsets[crtc].vline) @@ -3290,7 +3289,6 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev, DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); DRM_DEBUG("IH: D%d vline\n", crtc + 1); - break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); From f3f0ea9536e231fdc523fff25d11628bba0e5d20 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 11 Aug 2016 09:44:53 -0400 Subject: [PATCH 117/171] drm/amd/amdgpu: Cleanup register access in VCE v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 145 ++++++++------------------ 1 file changed, 43 insertions(+), 102 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 7e6bb45658f6..073cf9ed0674 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -110,22 +110,13 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) { - u32 tmp, data; - - tmp = data = RREG32(mmVCE_RB_ARB_CTRL); - if (override) - data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK; - else - data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK; - - if (tmp != data) - WREG32(mmVCE_RB_ARB_CTRL, data); + WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0); } static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, bool gated) { - u32 tmp, data; + u32 data; /* Set Override to disable Clock Gating */ vce_v3_0_override_vce_clock_gating(adev, true); @@ -136,65 +127,55 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, fly as necessary. */ if (gated) { - tmp = data = RREG32(mmVCE_CLOCK_GATING_B); + data = RREG32(mmVCE_CLOCK_GATING_B); data |= 0x1ff; data &= ~0xef0000; - if (tmp != data) - WREG32(mmVCE_CLOCK_GATING_B, data); + WREG32(mmVCE_CLOCK_GATING_B, data); - tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING); + data = RREG32(mmVCE_UENC_CLOCK_GATING); data |= 0x3ff000; data &= ~0xffc00000; - if (tmp != data) - WREG32(mmVCE_UENC_CLOCK_GATING, data); + WREG32(mmVCE_UENC_CLOCK_GATING, data); - tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2); + data = RREG32(mmVCE_UENC_CLOCK_GATING_2); data |= 0x2; data &= ~0x00010000; - if (tmp != data) - WREG32(mmVCE_UENC_CLOCK_GATING_2, data); + WREG32(mmVCE_UENC_CLOCK_GATING_2, data); - tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); data |= 0x37f; - if (tmp != data) - WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); + WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); - tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); + data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | 0x8; - if (tmp != data) - WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); + WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); } else { - tmp = data = RREG32(mmVCE_CLOCK_GATING_B); + data = RREG32(mmVCE_CLOCK_GATING_B); data &= ~0x80010; data |= 0xe70008; - if (tmp != data) - WREG32(mmVCE_CLOCK_GATING_B, data); + WREG32(mmVCE_CLOCK_GATING_B, data); - tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING); + data = RREG32(mmVCE_UENC_CLOCK_GATING); data |= 0xffc00000; - if (tmp != data) - WREG32(mmVCE_UENC_CLOCK_GATING, data); + WREG32(mmVCE_UENC_CLOCK_GATING, data); - tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2); + data = RREG32(mmVCE_UENC_CLOCK_GATING_2); data |= 0x10000; - if (tmp != data) - WREG32(mmVCE_UENC_CLOCK_GATING_2, data); + WREG32(mmVCE_UENC_CLOCK_GATING_2, data); - tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); data &= ~0xffc00000; - if (tmp != data) - WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); + WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); - tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); + data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | 0x8); - if (tmp != data) - WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); + WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); } vce_v3_0_override_vce_clock_gating(adev, false); } @@ -213,12 +194,9 @@ static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev) } DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); - WREG32_P(mmVCE_SOFT_RESET, - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); mdelay(10); - WREG32_P(mmVCE_SOFT_RESET, 0, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); mdelay(10); } @@ -256,34 +234,22 @@ static int vce_v3_0_start(struct amdgpu_device *adev) if (adev->vce.harvest_config & (1 << idx)) continue; - if (idx == 0) - WREG32_P(mmGRBM_GFX_INDEX, 0, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); - else - WREG32_P(mmGRBM_GFX_INDEX, - GRBM_GFX_INDEX__VCE_INSTANCE_MASK, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); - + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); vce_v3_0_mc_resume(adev, idx); - - WREG32_P(mmVCE_STATUS, VCE_STATUS__JOB_BUSY_MASK, - ~VCE_STATUS__JOB_BUSY_MASK); + WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); if (adev->asic_type >= CHIP_STONEY) WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); else - WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, - ~VCE_VCPU_CNTL__CLK_EN_MASK); - - WREG32_P(mmVCE_SOFT_RESET, 0, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); mdelay(100); r = vce_v3_0_firmware_loaded(adev); /* clear BUSY flag */ - WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); + WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0); if (r) { DRM_ERROR("VCE not responding, giving up!!!\n"); @@ -292,7 +258,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) } } - WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -307,33 +273,25 @@ static int vce_v3_0_stop(struct amdgpu_device *adev) if (adev->vce.harvest_config & (1 << idx)) continue; - if (idx == 0) - WREG32_P(mmGRBM_GFX_INDEX, 0, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); - else - WREG32_P(mmGRBM_GFX_INDEX, - GRBM_GFX_INDEX__VCE_INSTANCE_MASK, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); if (adev->asic_type >= CHIP_STONEY) WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); else - WREG32_P(mmVCE_VCPU_CNTL, 0, - ~VCE_VCPU_CNTL__CLK_EN_MASK); + WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0); + /* hold on ECPU */ - WREG32_P(mmVCE_SOFT_RESET, - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); /* clear BUSY flag */ - WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); + WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0); /* Set Clock-Gating off */ if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) vce_v3_0_set_vce_sw_clock_gating(adev, false); } - WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -561,9 +519,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) } WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); - - WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK, - ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); } static bool vce_v3_0_is_idle(void *handle) @@ -599,7 +555,6 @@ static int vce_v3_0_check_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset = 0; - u32 tmp; /* According to VCE team , we should use VCE_STATUS instead * SRBM_STATUS.VCE_BUSY bit for busy status checking. @@ -614,23 +569,17 @@ static int vce_v3_0_check_soft_reset(void *handle) * * VCE team suggest use bit 3--bit 6 for busy status check */ - tmp = RREG32(mmGRBM_GFX_INDEX); - tmp = REG_SET_FIELD(tmp, GRBM_GFX_INDEX, INSTANCE_INDEX, 0); - WREG32(mmGRBM_GFX_INDEX, tmp); + WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); } - tmp = RREG32(mmGRBM_GFX_INDEX); - tmp = REG_SET_FIELD(tmp, GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); - WREG32(mmGRBM_GFX_INDEX, tmp); + WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); } - tmp = RREG32(mmGRBM_GFX_INDEX); - tmp = REG_SET_FIELD(tmp, GRBM_GFX_INDEX, INSTANCE_INDEX, 0); - WREG32(mmGRBM_GFX_INDEX, tmp); + WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); if (srbm_soft_reset) { adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true; @@ -718,9 +667,7 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, { DRM_DEBUG("IH: VCE\n"); - WREG32_P(mmVCE_SYS_INT_STATUS, - VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK, - ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK); + WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1); switch (entry->src_data) { case 0: @@ -767,13 +714,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, if (adev->vce.harvest_config & (1 << i)) continue; - if (i == 0) - WREG32_P(mmGRBM_GFX_INDEX, 0, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); - else - WREG32_P(mmGRBM_GFX_INDEX, - GRBM_GFX_INDEX__VCE_INSTANCE_MASK, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); if (enable) { /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ @@ -792,7 +733,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, vce_v3_0_set_vce_sw_clock_gating(adev, enable); } - WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); mutex_unlock(&adev->grbm_idx_mutex); return 0; From 9aeb774c1e458313d9f013ca482be03a993c601e Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 11 Aug 2016 09:55:51 -0400 Subject: [PATCH 118/171] drm/amd/amdgpu: add mutex in check_soft for VCE v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 073cf9ed0674..615b8b16ad04 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -569,6 +569,7 @@ static int vce_v3_0_check_soft_reset(void *handle) * * VCE team suggest use bit 3--bit 6 for busy status check */ + mutex_lock(&adev->grbm_idx_mutex); WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); @@ -588,6 +589,7 @@ static int vce_v3_0_check_soft_reset(void *handle) adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = false; adev->vce.srbm_soft_reset = 0; } + mutex_unlock(&adev->grbm_idx_mutex); return 0; } From 21d3cbbe01c4c466dcba71071f16886526b6741b Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 11 Aug 2016 09:59:12 -0400 Subject: [PATCH 119/171] drm/amd/amdgpu: VCE v2 register cleanup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vce_v2_0.c | 39 ++++++++++----------------- 1 file changed, 14 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 80a37a602181..21ba219e943b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -127,15 +127,10 @@ static int vce_v2_0_start(struct amdgpu_device *adev) WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); - WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); - - WREG32_P(mmVCE_SOFT_RESET, - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - + WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); mdelay(100); - - WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); for (i = 0; i < 10; ++i) { uint32_t status; @@ -150,10 +145,9 @@ static int vce_v2_0_start(struct amdgpu_device *adev) break; DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); - WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); mdelay(10); - WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); mdelay(10); r = -1; } @@ -345,13 +339,13 @@ static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated) DRM_INFO("VCE is busy, Can't set clock gateing"); return; } - WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK); - WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); mdelay(100); WREG32(mmVCE_STATUS, 0); } else { - WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); - WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); mdelay(100); } @@ -458,9 +452,7 @@ static void vce_v2_0_mc_resume(struct amdgpu_device *adev) WREG32(mmVCE_VCPU_CACHE_SIZE2, size); WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); - - WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK, - ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); vce_v2_0_init_cg(adev); } @@ -474,11 +466,11 @@ static bool vce_v2_0_is_idle(void *handle) static int vce_v2_0_wait_for_idle(void *handle) { - unsigned i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + unsigned i; for (i = 0; i < adev->usec_timeout; i++) { - if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK)) + if (vce_v2_0_is_idle(handle)) return 0; } return -ETIMEDOUT; @@ -488,8 +480,7 @@ static int vce_v2_0_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK, - ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK); + WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1); mdelay(5); return vce_v2_0_start(adev); @@ -516,10 +507,8 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev, DRM_DEBUG("IH: VCE\n"); switch (entry->src_data) { case 0: - amdgpu_fence_process(&adev->vce.ring[0]); - break; case 1: - amdgpu_fence_process(&adev->vce.ring[1]); + amdgpu_fence_process(&adev->vce.ring[entry->src_data]); break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", From f4a7f127d9151683ca806f1c73cfbc72f268d62e Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 11 Aug 2016 10:08:22 -0400 Subject: [PATCH 120/171] drm/amd/amdgpu: UVD v6 register cleanup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index c11b97f8e376..7b7e82840c95 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -397,15 +397,13 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) uvd_v6_0_mc_resume(adev); /* disable clock gating */ - tmp = RREG32(mmUVD_CGC_CTRL); - tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; - WREG32(mmUVD_CGC_CTRL, tmp); + WREG32_FIELD(UVD_CGC_CTRL, DYN_CLOCK_MODE, 0); /* disable interupt */ - WREG32_P(mmUVD_MASTINT_EN, 0, ~UVD_MASTINT_EN__VCPU_EN_MASK); + WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0); /* stall UMC and register bus before resetting VCPU */ - WREG32_P(mmUVD_LMI_CTRL2, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1); mdelay(1); /* put LMI, VCPU, RBC etc... into reset */ @@ -421,7 +419,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) mdelay(5); /* take UVD block out of reset */ - WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); + WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0); mdelay(5); /* initialize UVD memory controller */ @@ -456,7 +454,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK); /* enable UMC */ - WREG32_P(mmUVD_LMI_CTRL2, 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0); /* boot up the VCPU */ WREG32(mmUVD_SOFT_RESET, 0); @@ -476,11 +474,9 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) break; DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); - WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, - ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1); mdelay(10); - WREG32_P(mmUVD_SOFT_RESET, 0, - ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0); mdelay(10); r = -1; } @@ -497,15 +493,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) /* clear the bit 4 of UVD_STATUS */ WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); + /* force RBC into idle state */ rb_bufsz = order_base_2(ring->ring_size); - tmp = 0; - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); - /* force RBC into idle state */ WREG32(mmUVD_RBC_RB_CNTL, tmp); /* set the write pointer delay */ @@ -526,7 +521,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); - WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); + WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0); return 0; } @@ -743,7 +738,7 @@ static int uvd_v6_0_wait_for_idle(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { - if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) + if (uvd_v6_0_is_idle(handle)) return 0; } return -ETIMEDOUT; From e7893c4bd34b9d2f942d77666656efaa084a3f87 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Tue, 26 Jul 2016 14:13:21 +0800 Subject: [PATCH 121/171] drm/amdgpu: add shadow bo support V2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit shadow bo is the shadow of a bo, which is always in GTT, which can be used to backup the original bo. V2: reference shadow parent, shadow bo will be freed by who allocted him. Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 48 ++++++++++++++++++++-- include/uapi/drm/amdgpu_drm.h | 2 + 3 files changed, 48 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c54f5b962b80..26f6028bb72b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -505,6 +505,7 @@ struct amdgpu_bo { struct amdgpu_device *adev; struct drm_gem_object gem_base; struct amdgpu_bo *parent; + struct amdgpu_bo *shadow; struct ttm_bo_kmap_obj dma_buf_vmap; struct amdgpu_mn *mn; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index d8e69a7e51f9..278017d3dc3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -380,6 +380,37 @@ fail_free: return r; } +static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, + unsigned long size, int byte_align, + struct amdgpu_bo *bo) +{ + struct ttm_placement placement = {0}; + struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; + int r; + + if (bo->shadow) + return 0; + + bo->flags |= AMDGPU_GEM_CREATE_SHADOW; + memset(&placements, 0, + (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); + + amdgpu_ttm_placement_init(adev, &placement, + placements, AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_GEM_CREATE_CPU_GTT_USWC); + + r = amdgpu_bo_create_restricted(adev, size, byte_align, true, + AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_GEM_CREATE_CPU_GTT_USWC, + NULL, &placement, + bo->tbo.resv, + &bo->shadow); + if (!r) + bo->shadow->parent = amdgpu_bo_ref(bo); + + return r; +} + int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, @@ -389,6 +420,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, { struct ttm_placement placement = {0}; struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; + int r; memset(&placements, 0, (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); @@ -396,9 +428,19 @@ int amdgpu_bo_create(struct amdgpu_device *adev, amdgpu_ttm_placement_init(adev, &placement, placements, domain, flags); - return amdgpu_bo_create_restricted(adev, size, byte_align, kernel, - domain, flags, sg, &placement, - resv, bo_ptr); + r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, + domain, flags, sg, &placement, + resv, bo_ptr); + if (r) + return r; + + if (flags & AMDGPU_GEM_CREATE_SHADOW) { + r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr)); + if (r) + amdgpu_bo_unref(bo_ptr); + } + + return r; } int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index a902a602490b..5aef0b71079b 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -79,6 +79,8 @@ extern "C" { #define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2) /* Flag that the memory should be in VRAM and cleared */ #define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3) +/* Flag that create shadow bo(GTT) while allocating vram bo */ +#define AMDGPU_GEM_CREATE_SHADOW (1 << 4) struct drm_amdgpu_gem_create_in { /** the requested memory size */ From 14fd833efa3f13619623501de5e2221dfdab7f7f Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Thu, 4 Aug 2016 13:05:46 +0800 Subject: [PATCH 122/171] drm/amdgpu: validate shadow as well when validating bo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 76 ++++++++++++++++---------- 1 file changed, 46 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 8eb93dff69d6..396a412d70d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -287,18 +287,56 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) return max(bytes_moved_threshold, 1024*1024ull); } +static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, + struct amdgpu_bo *bo) +{ + u64 initial_bytes_moved; + uint32_t domain; + int r; + + if (bo->pin_count) + return 0; + + /* Avoid moving this one if we have moved too many buffers + * for this IB already. + * + * Note that this allows moving at least one buffer of + * any size, because it doesn't take the current "bo" + * into account. We don't want to disallow buffer moves + * completely. + */ + if (p->bytes_moved <= p->bytes_moved_threshold) + domain = bo->prefered_domains; + else + domain = bo->allowed_domains; + +retry: + amdgpu_ttm_placement_from_domain(bo, domain); + initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); + r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); + p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - + initial_bytes_moved; + + if (unlikely(r)) { + if (r != -ERESTARTSYS && domain != bo->allowed_domains) { + domain = bo->allowed_domains; + goto retry; + } + } + + return r; +} + int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, struct list_head *validated) { struct amdgpu_bo_list_entry *lobj; - u64 initial_bytes_moved; int r; list_for_each_entry(lobj, validated, tv.head) { struct amdgpu_bo *bo = lobj->robj; bool binding_userptr = false; struct mm_struct *usermm; - uint32_t domain; usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); if (usermm && usermm != current->mm) @@ -313,35 +351,13 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, binding_userptr = true; } - if (bo->pin_count) - continue; - - /* Avoid moving this one if we have moved too many buffers - * for this IB already. - * - * Note that this allows moving at least one buffer of - * any size, because it doesn't take the current "bo" - * into account. We don't want to disallow buffer moves - * completely. - */ - if (p->bytes_moved <= p->bytes_moved_threshold) - domain = bo->prefered_domains; - else - domain = bo->allowed_domains; - - retry: - amdgpu_ttm_placement_from_domain(bo, domain); - initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); - r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); - p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - - initial_bytes_moved; - - if (unlikely(r)) { - if (r != -ERESTARTSYS && domain != bo->allowed_domains) { - domain = bo->allowed_domains; - goto retry; - } + r = amdgpu_cs_bo_validate(p, bo); + if (r) return r; + if (bo->shadow) { + r = amdgpu_cs_bo_validate(p, bo); + if (r) + return r; } if (binding_userptr) { From 1baa439fb2f4e586bf387cbbc515468ebb16e587 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Thu, 4 Aug 2016 13:59:32 +0800 Subject: [PATCH 123/171] drm/amdgpu: allocate shadow for pd/pt bo V2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The pd/pt shadow bo will be used to backup page table, when gpu reset happens, we can restore the page table by them. V2: Free shadow bo. Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 7f385e78a265..1a474fa1f441 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1328,7 +1328,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, AMDGPU_GPU_PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_NO_CPU_ACCESS, + AMDGPU_GEM_CREATE_NO_CPU_ACCESS | + AMDGPU_GEM_CREATE_SHADOW, NULL, resv, &pt); if (r) goto error_free; @@ -1527,7 +1528,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) r = amdgpu_bo_create(adev, pd_size, align, true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_NO_CPU_ACCESS, + AMDGPU_GEM_CREATE_NO_CPU_ACCESS | + AMDGPU_GEM_CREATE_SHADOW, NULL, NULL, &vm->page_directory); if (r) goto error_free_sched_entity; @@ -1583,10 +1585,16 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) kfree(mapping); } - for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) + for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) { + if (vm->page_tables[i].entry.robj && + vm->page_tables[i].entry.robj->shadow) + amdgpu_bo_unref(&vm->page_tables[i].entry.robj->shadow); amdgpu_bo_unref(&vm->page_tables[i].entry.robj); + } drm_free_large(vm->page_tables); + if (vm->page_directory->shadow) + amdgpu_bo_unref(&vm->page_directory->shadow); amdgpu_bo_unref(&vm->page_directory); fence_put(vm->page_directory_fence); } From 478feaf6cc420e66c071c0a743b334abfe8f18c9 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Thu, 4 Aug 2016 15:47:50 +0800 Subject: [PATCH 124/171] drm/amdgpu: add shadow flag V2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Indicate if need to sync between bo and shadow, where sync to where. V2: Rename to backup_shadow Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 26f6028bb72b..d18ae01bd339 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -480,6 +480,12 @@ struct amdgpu_bo_va { #define AMDGPU_GEM_DOMAIN_MAX 0x3 +enum amdgpu_bo_shadow { + AMDGPU_BO_SHADOW_TO_NONE = 0, + AMDGPU_BO_SHADOW_TO_PARENT, + AMDGPU_BO_SHADOW_TO_SHADOW, +}; + struct amdgpu_bo { /* Protected by gem.mutex */ struct list_head list; @@ -506,6 +512,8 @@ struct amdgpu_bo { struct drm_gem_object gem_base; struct amdgpu_bo *parent; struct amdgpu_bo *shadow; + /* indicate if need to sync between bo and shadow */ + enum amdgpu_bo_shadow backup_shadow; struct ttm_bo_kmap_obj dma_buf_vmap; struct amdgpu_mn *mn; From b0456f93063ec8629cfeee6d03758f92793d96cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 11 Aug 2016 14:06:54 +0200 Subject: [PATCH 125/171] drm/amdgpu: write PTEs directly into the IB. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Write the PTEs at the end of the IB instead of directly into the SDMA commands. This can save quite some CPU cycles building the entries. This doesn't change the DW estimation because PTEs where embedded into the IB before as well. It just moves them to the end of the IB. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Edward O'Callaghan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 1a474fa1f441..e5095b5e10e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -911,15 +911,15 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* padding, etc. */ ndw = 64; - if (params.src) { + if (src) { /* only copy commands needed */ ndw += ncmds * 7; - } else if (params.pages_addr) { - /* header for write data commands */ - ndw += ncmds * 4; + } else if (pages_addr) { + /* copy commands needed */ + ndw += ncmds * 7; - /* body of write data command */ + /* and also PTEs */ ndw += nptes * 2; } else { @@ -936,6 +936,22 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, params.ib = &job->ibs[0]; + if (!src && pages_addr) { + uint64_t *pte; + unsigned i; + + /* Put the PTEs at the end of the IB. */ + i = ndw - nptes * 2; + pte= (uint64_t *)&(job->ibs->ptr[i]); + params.src = job->ibs->gpu_addr + i * 4; + + for (i = 0; i < nptes; ++i) { + pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i * + AMDGPU_GPU_PAGE_SIZE); + pte[i] |= flags; + } + } + r = amdgpu_sync_fence(adev, &job->sync, exclusive); if (r) goto error_free; From b7fc2cbd5e9fd4fbac1c3a9fda6eff2b28b7bc4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 11 Aug 2016 16:44:15 +0200 Subject: [PATCH 126/171] drm/amdgpu: remove pages_addr handling from the VM code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not needed any more. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Edward O'Callaghan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index e5095b5e10e5..51db44abdfa5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -62,8 +62,6 @@ struct amdgpu_pte_update_params { struct amdgpu_device *adev; /* address where to copy page table entries from */ uint64_t src; - /* DMA addresses to use for mapping */ - dma_addr_t *pages_addr; /* indirect buffer to fill with commands */ struct amdgpu_ib *ib; }; @@ -492,11 +490,6 @@ static void amdgpu_vm_update_pages(struct amdgpu_pte_update_params *params, amdgpu_vm_copy_pte(params->adev, params->ib, pe, (params->src + (addr >> 12) * 8), count); - } else if (params->pages_addr) { - amdgpu_vm_write_pte(params->adev, params->ib, - params->pages_addr, - pe, addr, count, incr, flags); - } else if (count < 3) { amdgpu_vm_write_pte(params->adev, params->ib, NULL, pe, addr, count, incr, flags); @@ -826,7 +819,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, uint32_t frag; /* system pages are non continuously */ - if (params->src || params->pages_addr || !(flags & AMDGPU_PTE_VALID) || + if (params->src || !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { amdgpu_vm_update_ptes(params, vm, start, end, dst, flags); @@ -894,7 +887,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, memset(¶ms, 0, sizeof(params)); params.adev = adev; params.src = src; - params.pages_addr = pages_addr; /* sync to everything on unmapping */ if (!(flags & AMDGPU_PTE_VALID)) From de9ea7bd366009d379043d49b5b2660c5b462483 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 12 Aug 2016 11:33:30 +0200 Subject: [PATCH 127/171] drm/amdgpu: cleanup the write_pte implementations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We don't need the gart mapping handling here any more. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Edward O'Callaghan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 10 +++---- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 20 +++++-------- drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 40 +++++++++----------------- drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 40 +++++++++----------------- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 40 +++++++++----------------- 5 files changed, 53 insertions(+), 97 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index d18ae01bd339..387b4979f45a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -250,10 +250,9 @@ struct amdgpu_vm_pte_funcs { uint64_t pe, uint64_t src, unsigned count); /* write pte one entry at a time with addr mapping */ - void (*write_pte)(struct amdgpu_ib *ib, - const dma_addr_t *pages_addr, uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags); + void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe, + uint64_t value, unsigned count, + uint32_t incr); /* for linear pte/pde updates without addr mapping */ void (*set_pte_pde)(struct amdgpu_ib *ib, uint64_t pe, @@ -965,7 +964,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_job *job); int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); -uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_clear_freed(struct amdgpu_device *adev, @@ -2271,7 +2269,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) -#define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags))) +#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 51db44abdfa5..e0e40aca0f78 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -491,8 +491,8 @@ static void amdgpu_vm_update_pages(struct amdgpu_pte_update_params *params, pe, (params->src + (addr >> 12) * 8), count); } else if (count < 3) { - amdgpu_vm_write_pte(params->adev, params->ib, NULL, pe, addr, - count, incr, flags); + amdgpu_vm_write_pte(params->adev, params->ib, pe, + addr | flags, count, incr); } else { amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr, @@ -569,21 +569,15 @@ error: * Look up the physical address of the page that the pte resolves * to and return the pointer for the page table entry. */ -uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) +static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) { uint64_t result; - if (pages_addr) { - /* page table offset */ - result = pages_addr[addr >> PAGE_SHIFT]; + /* page table offset */ + result = pages_addr[addr >> PAGE_SHIFT]; - /* in case cpu page size != gpu page size*/ - result |= addr & (~PAGE_MASK); - - } else { - /* No mapping required */ - result = addr; - } + /* in case cpu page size != gpu page size*/ + result |= addr & (~PAGE_MASK); result &= 0xFFFFFFFFFFFFF000ULL; diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index ee6466912497..e5e44f42e20e 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -719,39 +719,27 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib, * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry - * @addr: dst addr to write into pe + * @value: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes - * @flags: access flags * * Update PTEs by writing them manually using sDMA (CIK). */ -static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, - const dma_addr_t *pages_addr, uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags) +static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, + uint64_t value, unsigned count, + uint32_t incr) { - uint64_t value; - unsigned ndw; + unsigned ndw = count * 2; - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - /* for non-physically contiguous pages (system) */ - ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, - SDMA_WRITE_SUB_OPCODE_LINEAR, 0); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = ndw; - for (; ndw > 0; ndw -= 2, --count, pe += 8) { - value = amdgpu_vm_map_gart(pages_addr, addr); - addr += incr; - value |= flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, + SDMA_WRITE_SUB_OPCODE_LINEAR, 0); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = ndw; + for (; ndw > 0; ndw -= 2) { + ib->ptr[ib->length_dw++] = lower_32_bits(value); + ib->ptr[ib->length_dw++] = upper_32_bits(value); + value += incr; } } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 1351c7e834a2..af0f0d283472 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -774,39 +774,27 @@ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib, * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry - * @addr: dst addr to write into pe + * @value: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes - * @flags: access flags * * Update PTEs by writing them manually using sDMA (CIK). */ -static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, - const dma_addr_t *pages_addr, uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags) +static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, + uint64_t value, unsigned count, + uint32_t incr) { - uint64_t value; - unsigned ndw; + unsigned ndw = count * 2; - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - /* for non-physically contiguous pages (system) */ - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = ndw; - for (; ndw > 0; ndw -= 2, --count, pe += 8) { - value = amdgpu_vm_map_gart(pages_addr, addr); - addr += incr; - value |= flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = pe; + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = ndw; + for (; ndw > 0; ndw -= 2, --count, pe += 8) { + ib->ptr[ib->length_dw++] = lower_32_bits(value); + ib->ptr[ib->length_dw++] = upper_32_bits(value); + value += incr; } } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 3a63a4fcf807..88faaee37258 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -1001,39 +1001,27 @@ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib, * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry - * @addr: dst addr to write into pe + * @value: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes - * @flags: access flags * * Update PTEs by writing them manually using sDMA (CIK). */ -static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, - const dma_addr_t *pages_addr, uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags) +static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, + uint64_t value, unsigned count, + uint32_t incr) { - uint64_t value; - unsigned ndw; + unsigned ndw = count * 2; - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - /* for non-physically contiguous pages (system) */ - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = ndw; - for (; ndw > 0; ndw -= 2, --count, pe += 8) { - value = amdgpu_vm_map_gart(pages_addr, addr); - addr += incr; - value |= flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = ndw; + for (; ndw > 0; ndw -= 2, --count, pe += 8) { + ib->ptr[ib->length_dw++] = lower_32_bits(value); + ib->ptr[ib->length_dw++] = upper_32_bits(value); + value += incr; } } From dc157c6daa317c75058def8f9753909e5224cbec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 12 Aug 2016 11:40:11 +0200 Subject: [PATCH 128/171] drm/amdgpu: remove AMDGPU_VM_NO_FLUSH define MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not used any more. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Edward O'Callaghan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index e0e40aca0f78..12925016370b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -51,9 +51,6 @@ * SI supports 16. */ -/* Special value that no flush is necessary */ -#define AMDGPU_VM_NO_FLUSH (~0ll) - /* Local structure. Encapsulate some VM table update parameters to reduce * the number of function parameters */ From 96105e5375892f63cc56fa707a1db0d74abc764d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 12 Aug 2016 12:59:59 +0200 Subject: [PATCH 129/171] drm/amdgpu: stop splitting PTE commands into smaller ones MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It doesn't make much sense to create bigger commands first which we then need to split into smaller one again. Just make sure the commands we create aren't to big in the first place. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Edward O'Callaghan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 ++- drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 69 ++++++++------------------ drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 69 ++++++++------------------ drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 69 ++++++++------------------ 5 files changed, 70 insertions(+), 146 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 387b4979f45a..1a7e05da470e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -833,6 +833,9 @@ struct amdgpu_ring { /* maximum number of VMIDs */ #define AMDGPU_NUM_VM 16 +/* Maximum number of PTEs the hardware can write with one command */ +#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF + /* number of entries in page table */ #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 12925016370b..673c258e49db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -639,7 +639,8 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, pde = pd_addr + pt_idx * 8; if (((last_pde + 8 * count) != pde) || - ((last_pt + incr * count) != pt)) { + ((last_pt + incr * count) != pt) || + (count == AMDGPU_VM_MAX_UPDATE_SIZE)) { if (count) { amdgpu_vm_update_pages(¶ms, last_pde, @@ -743,7 +744,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, next_pe_start = amdgpu_bo_gpu_offset(pt); next_pe_start += (addr & mask) * 8; - if ((cur_pe_start + 8 * cur_nptes) == next_pe_start) { + if ((cur_pe_start + 8 * cur_nptes) == next_pe_start && + ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) { /* The next ptb is consecutive to current ptb. * Don't call amdgpu_vm_update_pages now. * Will update two ptbs together in future. diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index e5e44f42e20e..e71cd12104b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -694,24 +694,16 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) { - while (count) { - unsigned bytes = count * 8; - if (bytes > 0x1FFFF8) - bytes = 0x1FFFF8; + unsigned bytes = count * 8; - ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, - SDMA_WRITE_SUB_OPCODE_LINEAR, 0); - ib->ptr[ib->length_dw++] = bytes; - ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ - ib->ptr[ib->length_dw++] = lower_32_bits(src); - ib->ptr[ib->length_dw++] = upper_32_bits(src); - ib->ptr[ib->length_dw++] = lower_32_bits(pe); - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - - pe += bytes; - src += bytes; - count -= bytes / 8; - } + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, + SDMA_WRITE_SUB_OPCODE_LINEAR, 0); + ib->ptr[ib->length_dw++] = bytes; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(src); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); } /** @@ -755,40 +747,21 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, * * Update the page tables using sDMA (CIK). */ -static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, - uint64_t pe, +static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { - uint64_t value; - unsigned ndw; - - while (count) { - ndw = count; - if (ndw > 0x7FFFF) - ndw = 0x7FFFF; - - if (flags & AMDGPU_PTE_VALID) - value = addr; - else - value = 0; - - /* for physically contiguous pages (vram) */ - ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); - ib->ptr[ib->length_dw++] = pe; /* dst addr */ - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = flags; /* mask */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = value; /* value */ - ib->ptr[ib->length_dw++] = upper_32_bits(value); - ib->ptr[ib->length_dw++] = incr; /* increment size */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = ndw; /* number of entries */ - - pe += ndw * 8; - addr += ndw * incr; - count -= ndw; - } + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = count; /* number of entries */ } /** diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index af0f0d283472..e82229686783 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -749,24 +749,16 @@ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) { - while (count) { - unsigned bytes = count * 8; - if (bytes > 0x1FFFF8) - bytes = 0x1FFFF8; + unsigned bytes = count * 8; - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); - ib->ptr[ib->length_dw++] = bytes; - ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ - ib->ptr[ib->length_dw++] = lower_32_bits(src); - ib->ptr[ib->length_dw++] = upper_32_bits(src); - ib->ptr[ib->length_dw++] = lower_32_bits(pe); - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - - pe += bytes; - src += bytes; - count -= bytes / 8; - } + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = bytes; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(src); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); } /** @@ -810,40 +802,21 @@ static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, * * Update the page tables using sDMA (CIK). */ -static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, - uint64_t pe, +static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { - uint64_t value; - unsigned ndw; - - while (count) { - ndw = count; - if (ndw > 0x7FFFF) - ndw = 0x7FFFF; - - if (flags & AMDGPU_PTE_VALID) - value = addr; - else - value = 0; - - /* for physically contiguous pages (vram) */ - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); - ib->ptr[ib->length_dw++] = pe; /* dst addr */ - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = flags; /* mask */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = value; /* value */ - ib->ptr[ib->length_dw++] = upper_32_bits(value); - ib->ptr[ib->length_dw++] = incr; /* increment size */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = ndw; /* number of entries */ - - pe += ndw * 8; - addr += ndw * incr; - count -= ndw; - } + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = count; /* number of entries */ } /** diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 88faaee37258..bee4978bec73 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -976,24 +976,16 @@ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) { - while (count) { - unsigned bytes = count * 8; - if (bytes > 0x1FFFF8) - bytes = 0x1FFFF8; + unsigned bytes = count * 8; - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); - ib->ptr[ib->length_dw++] = bytes; - ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ - ib->ptr[ib->length_dw++] = lower_32_bits(src); - ib->ptr[ib->length_dw++] = upper_32_bits(src); - ib->ptr[ib->length_dw++] = lower_32_bits(pe); - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - - pe += bytes; - src += bytes; - count -= bytes / 8; - } + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = bytes; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(src); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); } /** @@ -1037,40 +1029,21 @@ static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, * * Update the page tables using sDMA (CIK). */ -static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, - uint64_t pe, +static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { - uint64_t value; - unsigned ndw; - - while (count) { - ndw = count; - if (ndw > 0x7FFFF) - ndw = 0x7FFFF; - - if (flags & AMDGPU_PTE_VALID) - value = addr; - else - value = 0; - - /* for physically contiguous pages (vram) */ - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); - ib->ptr[ib->length_dw++] = pe; /* dst addr */ - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = flags; /* mask */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = value; /* value */ - ib->ptr[ib->length_dw++] = upper_32_bits(value); - ib->ptr[ib->length_dw++] = incr; /* increment size */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = ndw; /* number of entries */ - - pe += ndw * 8; - addr += ndw * incr; - count -= ndw; - } + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = count; /* number of entries */ } /** From afef8b8f99b6489ae1e9fe535d74bf2ce9fa17bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 12 Aug 2016 13:29:18 +0200 Subject: [PATCH 130/171] drm/amdgpu: add function pointer to the pte_update_params MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remember what function to call while planning the commands instead of figuring it our later on. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Edward O'Callaghan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 76 ++++++++++++++++++-------- 1 file changed, 54 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 673c258e49db..7ca2e8fa5906 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -61,6 +61,10 @@ struct amdgpu_pte_update_params { uint64_t src; /* indirect buffer to fill with commands */ struct amdgpu_ib *ib; + /* Function which actually does the update */ + void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe, + uint64_t addr, unsigned count, uint32_t incr, + uint32_t flags); }; /** @@ -464,7 +468,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, } /** - * amdgpu_vm_update_pages - helper to call the right asic function + * amdgpu_vm_do_set_ptes - helper to call the right asic function * * @params: see amdgpu_pte_update_params definition * @pe: addr of the page entry @@ -476,18 +480,14 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, * Traces the parameters and calls the right asic functions * to setup the page table using the DMA. */ -static void amdgpu_vm_update_pages(struct amdgpu_pte_update_params *params, - uint64_t pe, uint64_t addr, - unsigned count, uint32_t incr, - uint32_t flags) +static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params, + uint64_t pe, uint64_t addr, + unsigned count, uint32_t incr, + uint32_t flags) { trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); - if (params->src) { - amdgpu_vm_copy_pte(params->adev, params->ib, - pe, (params->src + (addr >> 12) * 8), count); - - } else if (count < 3) { + if (count < 3) { amdgpu_vm_write_pte(params->adev, params->ib, pe, addr | flags, count, incr); @@ -497,6 +497,29 @@ static void amdgpu_vm_update_pages(struct amdgpu_pte_update_params *params, } } +/** + * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART + * + * @params: see amdgpu_pte_update_params definition + * @pe: addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: hw access flags + * + * Traces the parameters and calls the DMA function to copy the PTEs. + */ +static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params, + uint64_t pe, uint64_t addr, + unsigned count, uint32_t incr, + uint32_t flags) +{ + trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); + + amdgpu_vm_copy_pte(params->adev, params->ib, pe, + (params->src + (addr >> 12) * 8), count); +} + /** * amdgpu_vm_clear_bo - initially clear the page dir/table * @@ -537,7 +560,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, memset(¶ms, 0, sizeof(params)); params.adev = adev; params.ib = &job->ibs[0]; - amdgpu_vm_update_pages(¶ms, addr, 0, entries, 0, 0); + amdgpu_vm_do_set_ptes(¶ms, addr, 0, entries, 0, 0); amdgpu_ring_pad_ib(ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > 64); @@ -643,9 +666,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, (count == AMDGPU_VM_MAX_UPDATE_SIZE)) { if (count) { - amdgpu_vm_update_pages(¶ms, last_pde, - last_pt, count, incr, - AMDGPU_PTE_VALID); + amdgpu_vm_do_set_ptes(¶ms, last_pde, + last_pt, count, incr, + AMDGPU_PTE_VALID); } count = 1; @@ -657,8 +680,8 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, } if (count) - amdgpu_vm_update_pages(¶ms, last_pde, last_pt, - count, incr, AMDGPU_PTE_VALID); + amdgpu_vm_do_set_ptes(¶ms, last_pde, last_pt, + count, incr, AMDGPU_PTE_VALID); if (params.ib->length_dw != 0) { amdgpu_ring_pad_ib(ring, params.ib); @@ -747,14 +770,13 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, if ((cur_pe_start + 8 * cur_nptes) == next_pe_start && ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) { /* The next ptb is consecutive to current ptb. - * Don't call amdgpu_vm_update_pages now. + * Don't call the update function now. * Will update two ptbs together in future. */ cur_nptes += nptes; } else { - amdgpu_vm_update_pages(params, cur_pe_start, cur_dst, - cur_nptes, AMDGPU_GPU_PAGE_SIZE, - flags); + params->func(params, cur_pe_start, cur_dst, cur_nptes, + AMDGPU_GPU_PAGE_SIZE, flags); cur_pe_start = next_pe_start; cur_nptes = nptes; @@ -766,8 +788,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, dst += nptes * AMDGPU_GPU_PAGE_SIZE; } - amdgpu_vm_update_pages(params, cur_pe_start, cur_dst, cur_nptes, - AMDGPU_GPU_PAGE_SIZE, flags); + params->func(params, cur_pe_start, cur_dst, cur_nptes, + AMDGPU_GPU_PAGE_SIZE, flags); } /* @@ -875,6 +897,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, struct fence *f = NULL; int r; + memset(¶ms, 0, sizeof(params)); + params.adev = adev; + params.src = src; + ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); memset(¶ms, 0, sizeof(params)); @@ -900,6 +926,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* only copy commands needed */ ndw += ncmds * 7; + params.func = amdgpu_vm_do_copy_ptes; + } else if (pages_addr) { /* copy commands needed */ ndw += ncmds * 7; @@ -907,12 +935,16 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* and also PTEs */ ndw += nptes * 2; + params.func = amdgpu_vm_do_copy_ptes; + } else { /* set page commands needed */ ndw += ncmds * 10; /* two extra commands for begin/end of fragment */ ndw += 2 * 10; + + params.func = amdgpu_vm_do_set_ptes; } r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); From 6681c5eba1d2dbc559f99d1de4e67ccd3abd7df8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 12 Aug 2016 16:50:12 +0200 Subject: [PATCH 131/171] drm/amdgpu: fix coding style in amdgpu_object.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just a few 80 chars problems. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 86 ++++++++++++---------- 1 file changed, 49 insertions(+), 37 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 278017d3dc3f..4d084eeb4e09 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -44,14 +44,13 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev); static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev, struct ttm_mem_reg *mem) { - u64 ret = 0; - if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) { - ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) > - adev->mc.visible_vram_size ? - adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) : - mem->size; - } - return ret; + if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size) + return 0; + + return ((mem->start << PAGE_SHIFT) + mem->size) > + adev->mc.visible_vram_size ? + adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) : + mem->size; } static void amdgpu_update_memory_usage(struct amdgpu_device *adev, @@ -125,8 +124,9 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, adev->mc.visible_vram_size < adev->mc.real_vram_size) { placements[c].fpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; - placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN; + placements[c++].flags = TTM_PL_FLAG_WC | + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM | + TTM_PL_FLAG_TOPDOWN; } placements[c].fpfn = 0; placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | @@ -138,22 +138,24 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, if (domain & AMDGPU_GEM_DOMAIN_GTT) { if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) { placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT | - TTM_PL_FLAG_UNCACHED; + placements[c++].flags = TTM_PL_FLAG_WC | + TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED; } else { placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; + placements[c++].flags = TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_TT; } } if (domain & AMDGPU_GEM_DOMAIN_CPU) { if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) { placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM | - TTM_PL_FLAG_UNCACHED; + placements[c++].flags = TTM_PL_FLAG_WC | + TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_UNCACHED; } else { placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; + placements[c++].flags = TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_SYSTEM; } } @@ -539,7 +541,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, /* force to pin into visible video ram */ if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && - (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) { + (!max_offset || max_offset > + bo->adev->mc.visible_vram_size)) { if (WARN_ON_ONCE(min_offset > bo->adev->mc.visible_vram_size)) return -EINVAL; @@ -558,19 +561,23 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, } r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); - if (likely(r == 0)) { - bo->pin_count = 1; - if (gpu_addr != NULL) - *gpu_addr = amdgpu_bo_gpu_offset(bo); - if (domain == AMDGPU_GEM_DOMAIN_VRAM) { - bo->adev->vram_pin_size += amdgpu_bo_size(bo); - if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) - bo->adev->invisible_pin_size += amdgpu_bo_size(bo); - } else - bo->adev->gart_pin_size += amdgpu_bo_size(bo); - } else { + if (unlikely(r)) { dev_err(bo->adev->dev, "%p pin failed\n", bo); + goto error; } + + bo->pin_count = 1; + if (gpu_addr != NULL) + *gpu_addr = amdgpu_bo_gpu_offset(bo); + if (domain == AMDGPU_GEM_DOMAIN_VRAM) { + bo->adev->vram_pin_size += amdgpu_bo_size(bo); + if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) + bo->adev->invisible_pin_size += amdgpu_bo_size(bo); + } else { + bo->adev->gart_pin_size += amdgpu_bo_size(bo); + } + +error: return r; } @@ -595,16 +602,20 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo) bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; } r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); - if (likely(r == 0)) { - if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { - bo->adev->vram_pin_size -= amdgpu_bo_size(bo); - if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) - bo->adev->invisible_pin_size -= amdgpu_bo_size(bo); - } else - bo->adev->gart_pin_size -= amdgpu_bo_size(bo); - } else { + if (unlikely(r)) { dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo); + goto error; } + + if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { + bo->adev->vram_pin_size -= amdgpu_bo_size(bo); + if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) + bo->adev->invisible_pin_size -= amdgpu_bo_size(bo); + } else { + bo->adev->gart_pin_size -= amdgpu_bo_size(bo); + } + +error: return r; } @@ -775,7 +786,8 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) for (i = 0; i < abo->placement.num_placement; i++) { /* Force into visible VRAM */ if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) && - (!abo->placements[i].lpfn || abo->placements[i].lpfn > lpfn)) + (!abo->placements[i].lpfn || + abo->placements[i].lpfn > lpfn)) abo->placements[i].lpfn = lpfn; } r = ttm_bo_validate(bo, &abo->placement, false, false); From 89a8f30900754eb27f8f534f35be49b8202c76cd Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Fri, 12 Aug 2016 15:14:31 -0400 Subject: [PATCH 132/171] drm/amd/amdgpu: Add more config data for debugfs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds rev_id as well as cg/pg flags to help debug runtime. Signed-off-by: Tom St Denis Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3751d262de68..e606cb2fcc28 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2585,7 +2585,7 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, return -ENOMEM; /* version, increment each time something is added */ - config[no_regs++] = 0; + config[no_regs++] = 1; config[no_regs++] = adev->gfx.config.max_shader_engines; config[no_regs++] = adev->gfx.config.max_tile_pipes; config[no_regs++] = adev->gfx.config.max_cu_per_sh; @@ -2610,6 +2610,11 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, config[no_regs++] = adev->gfx.config.gb_addr_config; config[no_regs++] = adev->gfx.config.num_rbs; + /* rev==1 */ + config[no_regs++] = adev->rev_id; + config[no_regs++] = adev->pg_flags; + config[no_regs++] = adev->cg_flags; + while (size && (*pos < no_regs * 4)) { uint32_t value; From 041aa65805de1a8af87dab2a26e52ebfe01f1963 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Wed, 17 Aug 2016 14:59:20 +0800 Subject: [PATCH 133/171] drm/amdgpu: Hardcode virtual DCE vblank / scanout position return values MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For virtual display feature, by hardcoding 0 for the vblank counter and -EINVAL for the scanout position return value, we signal to the core DRM code that there are no hardware counters we can use for these. Signed-off-by: Emily Deng Reviewed-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 6f533a0dbdb4..00663a7b4053 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -55,10 +55,7 @@ static void dce_virtual_vblank_wait(struct amdgpu_device *adev, int crtc) static u32 dce_virtual_vblank_get_counter(struct amdgpu_device *adev, int crtc) { - if (crtc >= adev->mode_info.num_crtc) - return 0; - else - return adev->ddev->vblank[crtc].count; + return 0; } static void dce_virtual_page_flip(struct amdgpu_device *adev, @@ -70,13 +67,10 @@ static void dce_virtual_page_flip(struct amdgpu_device *adev, static int dce_virtual_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, u32 *vbl, u32 *position) { - if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) - return -EINVAL; - *vbl = 0; *position = 0; - return 0; + return -EINVAL; } static bool dce_virtual_hpd_sense(struct amdgpu_device *adev, @@ -407,6 +401,8 @@ static int dce_virtual_sw_init(void *handle) if (r) return r; + adev->ddev->max_vblank_count = 0; + adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; adev->ddev->mode_config.max_width = 16384; @@ -655,7 +651,6 @@ static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vbla struct amdgpu_mode_info *mode_info = container_of(vblank_timer, struct amdgpu_mode_info ,vblank_timer); struct amdgpu_device *adev = container_of(mode_info, struct amdgpu_device ,mode_info); unsigned crtc = 0; - adev->ddev->vblank[0].count++; drm_handle_vblank(adev->ddev, crtc); dce_virtual_pageflip_irq(adev, NULL, NULL); hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL); @@ -682,8 +677,6 @@ static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *ad hrtimer_cancel(&adev->mode_info.vblank_timer); } - if (!state || (state && !adev->mode_info.vsync_timer_enabled)) - adev->ddev->vblank[0].count = 0; adev->mode_info.vsync_timer_enabled = state; DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state); } @@ -720,7 +713,6 @@ static int dce_virtual_crtc_irq(struct amdgpu_device *adev, unsigned crtc = 0; unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1; - adev->ddev->vblank[crtc].count++; dce_virtual_crtc_vblank_int_ack(adev, crtc); if (amdgpu_irq_enabled(adev, source, irq_type)) { From e9f11dc80a113f13731f0af883770b05b3d50509 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Wed, 17 Aug 2016 12:00:51 -0400 Subject: [PATCH 134/171] drm/amd/amdgpu: Add more debugfs config data Adds family and external_rev_id to config data Signed-off-by: Tom St Denis Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index e606cb2fcc28..c356bb538b43 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2585,7 +2585,7 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, return -ENOMEM; /* version, increment each time something is added */ - config[no_regs++] = 1; + config[no_regs++] = 2; config[no_regs++] = adev->gfx.config.max_shader_engines; config[no_regs++] = adev->gfx.config.max_tile_pipes; config[no_regs++] = adev->gfx.config.max_cu_per_sh; @@ -2615,6 +2615,10 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, config[no_regs++] = adev->pg_flags; config[no_regs++] = adev->cg_flags; + /* rev==2 */ + config[no_regs++] = adev->family; + config[no_regs++] = adev->external_rev_id; + while (size && (*pos < no_regs * 4)) { uint32_t value; From 2a702ccd9a79f6fc23fdb07a410a5f6533ac8f78 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 17 Aug 2016 15:48:30 +0800 Subject: [PATCH 135/171] drm/amd/powerplay: enable power containment features for tonga. v2: fix build error introduced when fix code style problems. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/Makefile | 2 +- .../gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c | 24 +- .../gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h | 11 +- .../drm/amd/powerplay/hwmgr/tonga_powertune.c | 498 ++++++++++++++++++ .../drm/amd/powerplay/hwmgr/tonga_powertune.h | 22 +- 5 files changed, 548 insertions(+), 9 deletions(-) create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile index c264cb66b020..abbcbc9f6eca 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile @@ -4,7 +4,7 @@ HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \ hardwaremanager.o pp_acpi.o cz_hwmgr.o \ - cz_clockpowergating.o \ + cz_clockpowergating.o tonga_powertune.o\ tonga_processpptables.o ppatomctrl.o \ tonga_hwmgr.o pppcielanes.o tonga_thermal.o\ fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c index c7dc111221c2..e7c281f6dbc7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c @@ -302,6 +302,8 @@ void tonga_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition); + tonga_initialize_power_tune_defaults(hwmgr); + data->mclk_strobe_mode_threshold = 40000; data->mclk_stutter_mode_threshold = 30000; data->mclk_edc_enable_threshold = 40000; @@ -2479,7 +2481,7 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, uint32_t graphic_level->VoltageDownHyst = 0; graphic_level->PowerThrottle = 0; - threshold = engine_clock * data->fast_watemark_threshold / 100; + threshold = engine_clock * data->fast_watermark_threshold / 100; /* *get the DAL clock. do it in funture. PECI_GetMinClockSettings(hwmgr->peci, &minClocks); @@ -2982,6 +2984,10 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot Level!", return result;); + result = tonga_populate_bapm_parameters_in_dpm_table(hwmgr); + PP_ASSERT_WITH_CODE(result == 0, + "Failed to populate BAPM Parameters!", return result); + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ClockStretcher)) { result = tonga_populate_clock_stretcher_data_table(hwmgr); @@ -4370,6 +4376,10 @@ int tonga_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to initialize ARB table index!", result = tmp_result); + tmp_result = tonga_populate_pm_fuses(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to populate PM fuses!", result = tmp_result); + tmp_result = tonga_populate_initial_mc_reg_table(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to populate initialize MC Reg table!", result = tmp_result); @@ -4388,6 +4398,18 @@ int tonga_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to start DPM!", result = tmp_result); + tmp_result = tonga_enable_smc_cac(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to enable SMC CAC!", result = tmp_result); + + tmp_result = tonga_enable_power_containment(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to enable power containment!", result = tmp_result); + + tmp_result = tonga_power_control_set_level(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to power control set level!", result = tmp_result); + return result; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h index 3961884bfa9b..fcad9426d3c1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h @@ -300,6 +300,7 @@ struct tonga_hwmgr { bool dll_defaule_on; bool performance_request_registered; + /* ----------------- Low Power Features ---------------------*/ phw_tonga_bacos bacos; phw_tonga_ulv_parm ulv; @@ -314,10 +315,14 @@ struct tonga_hwmgr { bool enable_tdc_limit_feature; bool enable_pkg_pwr_tracking_feature; bool disable_uvd_power_tune_feature; - phw_tonga_pt_defaults *power_tune_defaults; + struct tonga_pt_defaults *power_tune_defaults; SMU72_Discrete_PmFuses power_tune_table; - uint32_t ul_dte_tj_offset; /* Fudge factor in DPM table to correct HW DTE errors */ - uint32_t fast_watemark_threshold; /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */ + uint32_t dte_tj_offset; /* Fudge factor in DPM table to correct HW DTE errors */ + uint32_t fast_watermark_threshold; /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */ + + + bool enable_dte_feature; + /* ----------------- Phase Shedding ---------------------*/ bool vddc_phase_shed_control; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c new file mode 100644 index 000000000000..9496ade3247e --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c @@ -0,0 +1,498 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "hwmgr.h" +#include "smumgr.h" +#include "tonga_hwmgr.h" +#include "tonga_powertune.h" +#include "tonga_smumgr.h" +#include "smu72_discrete.h" +#include "pp_debug.h" +#include "tonga_ppsmc.h" + +#define VOLTAGE_SCALE 4 +#define POWERTUNE_DEFAULT_SET_MAX 1 + +struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = { +/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */ + {1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, + {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61}, + {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } }, +}; + +void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *tonga_hwmgr = (struct tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t tmp = 0; + + if (table_info && + table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX && + table_info->cac_dtp_table->usPowerTuneDataSetID) + tonga_hwmgr->power_tune_defaults = + &tonga_power_tune_data_set_array + [table_info->cac_dtp_table->usPowerTuneDataSetID - 1]; + else + tonga_hwmgr->power_tune_defaults = &tonga_power_tune_data_set_array[0]; + + /* Assume disabled */ + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SQRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TDRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TCPRamping); + + tonga_hwmgr->dte_tj_offset = tmp; + + if (!tmp) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + + tonga_hwmgr->fast_watermark_threshold = 100; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + tmp = 1; + tonga_hwmgr->enable_dte_feature = tmp ? false : true; + tonga_hwmgr->enable_tdc_limit_feature = tmp ? true : false; + tonga_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false; + } + } +} + + +int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct tonga_pt_defaults *defaults = data->power_tune_defaults; + SMU72_Discrete_DpmTable *dpm_table = &(data->smc_state_table); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table; + int i, j, k; + uint16_t *pdef1; + uint16_t *pdef2; + + + /* TDP number of fraction bits are changed from 8 to 7 for Fiji + * as requested by SMC team + */ + dpm_table->DefaultTdp = PP_HOST_TO_SMC_US( + (uint16_t)(cac_dtp_table->usTDP * 256)); + dpm_table->TargetTdp = PP_HOST_TO_SMC_US( + (uint16_t)(cac_dtp_table->usConfigurableTDP * 256)); + + PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255, + "Target Operating Temp is out of Range!", + ); + + dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp); + dpm_table->GpuTjHyst = 8; + + dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base; + + dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient); + pdef1 = defaults->bapmti_r; + pdef2 = defaults->bapmti_rc; + + for (i = 0; i < SMU72_DTE_ITERATIONS; i++) { + for (j = 0; j < SMU72_DTE_SOURCES; j++) { + for (k = 0; k < SMU72_DTE_SINKS; k++) { + dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1); + dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2); + pdef1++; + pdef2++; + } + } + } + + return 0; +} + +static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + const struct tonga_pt_defaults *defaults = data->power_tune_defaults; + + data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; + data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC; + data->power_tune_table.SviLoadLineTrimVddC = 3; + data->power_tune_table.SviLoadLineOffsetVddC = 0; + + return 0; +} + +static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr) +{ + uint16_t tdc_limit; + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + const struct tonga_pt_defaults *defaults = data->power_tune_defaults; + + /* TDC number of fraction bits are changed from 8 to 7 + * for Fiji as requested by SMC team + */ + tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256); + data->power_tune_table.TDC_VDDC_PkgLimit = + CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); + data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = + defaults->tdc_vddc_throttle_release_limit_perc; + data->power_tune_table.TDC_MAWt = defaults->tdc_mawt; + + return 0; +} + +static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + const struct tonga_pt_defaults *defaults = data->power_tune_defaults; + uint32_t temp; + + if (tonga_read_smc_sram_dword(hwmgr->smumgr, + fuse_table_offset + + offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl), + (uint32_t *)&temp, data->sram_end)) + PP_ASSERT_WITH_CODE(false, + "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", + return -EINVAL); + else + data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl; + + return 0; +} + +static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr) +{ + int i; + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + data->power_tune_table.LPMLTemperatureScaler[i] = 0; + + return 0; +} + +static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + if ((hwmgr->thermal_controller.advanceFanControlParameters. + usFanOutputSensitivity & (1 << 15)) || + (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0)) + hwmgr->thermal_controller.advanceFanControlParameters. + usFanOutputSensitivity = hwmgr->thermal_controller. + advanceFanControlParameters.usDefaultFanOutputSensitivity; + + data->power_tune_table.FuzzyFan_PwmSetDelta = + PP_HOST_TO_SMC_US(hwmgr->thermal_controller. + advanceFanControlParameters.usFanOutputSensitivity); + return 0; +} + +static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr) +{ + int i; + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + data->power_tune_table.GnbLPML[i] = 0; + + return 0; +} + +static int tonga_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint16_t hi_sidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd; + uint16_t lo_sidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd; + struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; + + hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); + lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); + + data->power_tune_table.BapmVddCBaseLeakageHiSidd = + CONVERT_FROM_HOST_TO_SMC_US(hi_sidd); + data->power_tune_table.BapmVddCBaseLeakageLoSidd = + CONVERT_FROM_HOST_TO_SMC_US(lo_sidd); + + return 0; +} + +int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + uint32_t pm_fuse_table_offset; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (tonga_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, PmFuseTable), + &pm_fuse_table_offset, data->sram_end)) + PP_ASSERT_WITH_CODE(false, + "Attempt to get pm_fuse_table_offset Failed!", + return -EINVAL); + + /* DW6 */ + if (tonga_populate_svi_load_line(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate SviLoadLine Failed!", + return -EINVAL); + /* DW7 */ + if (tonga_populate_tdc_limit(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TDCLimit Failed!", return -EINVAL); + /* DW8 */ + if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TdcWaterfallCtl Failed !", + return -EINVAL); + + /* DW9-DW12 */ + if (tonga_populate_temperature_scaler(hwmgr) != 0) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate LPMLTemperatureScaler Failed!", + return -EINVAL); + + /* DW13-DW14 */ + if (tonga_populate_fuzzy_fan(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate Fuzzy Fan Control parameters Failed!", + return -EINVAL); + + /* DW15-DW18 */ + if (tonga_populate_gnb_lpml(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Failed!", + return -EINVAL); + + /* DW19 */ + if (tonga_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Min and Max Vid Failed!", + return -EINVAL); + + /* DW20 */ + if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!", + return -EINVAL); + + if (tonga_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + (uint8_t *)&data->power_tune_table, + sizeof(struct SMU72_Discrete_PmFuses), data->sram_end)) + PP_ASSERT_WITH_CODE(false, + "Attempt to download PmFuseTable Failed!", + return -EINVAL); + } + return 0; +} + +int tonga_enable_smc_cac(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC)) { + int smc_result; + + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_EnableCac)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to enable CAC in SMC.", result = -1); + + data->cac_enabled = (smc_result == 0) ? true : false; + } + return result; +} + +int tonga_disable_smc_cac(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC) && data->cac_enabled) { + int smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_DisableCac)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable CAC in SMC.", result = -1); + + data->cac_enabled = false; + } + return result; +} + +int tonga_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_PkgPwrLimit) + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PkgPwrSetLimit, n); + return 0; +} + +static int tonga_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp) +{ + return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr, + PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); +} + +int tonga_enable_power_containment(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + int smc_result; + int result = 0; + + data->power_containment_features = 0; + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (data->enable_dte_feature) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_EnableDTE)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to enable DTE in SMC.", result = -1;); + if (smc_result == 0) + data->power_containment_features |= POWERCONTAINMENT_FEATURE_DTE; + } + + if (data->enable_tdc_limit_feature) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_TDCLimitEnable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to enable TDCLimit in SMC.", result = -1;); + if (smc_result == 0) + data->power_containment_features |= + POWERCONTAINMENT_FEATURE_TDCLimit; + } + + if (data->enable_pkg_pwr_tracking_feature) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to enable PkgPwrTracking in SMC.", result = -1;); + if (smc_result == 0) { + struct phm_cac_tdp_table *cac_table = + table_info->cac_dtp_table; + uint32_t default_limit = + (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256); + + data->power_containment_features |= + POWERCONTAINMENT_FEATURE_PkgPwrLimit; + + if (tonga_set_power_limit(hwmgr, default_limit)) + printk(KERN_ERR "Failed to set Default Power Limit in SMC!"); + } + } + } + return result; +} + +int tonga_disable_power_containment(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment) && + data->power_containment_features) { + int smc_result; + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_TDCLimit) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_TDCLimitDisable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable TDCLimit in SMC.", + result = smc_result); + } + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_DTE) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_DisableDTE)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable DTE in SMC.", + result = smc_result); + } + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_PkgPwrLimit) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable PkgPwrTracking in SMC.", + result = smc_result); + } + data->power_containment_features = 0; + } + + return result; +} + +int tonga_power_control_set_level(struct pp_hwmgr *hwmgr) +{ + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; + int adjust_percent, target_tdp; + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + /* adjustment percentage has already been validated */ + adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ? + hwmgr->platform_descriptor.TDPAdjustment : + (-1 * hwmgr->platform_descriptor.TDPAdjustment); + /* SMC requested that target_tdp to be 7 bit fraction in DPM table + * but message to be 8 bit fraction for messages + */ + target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100; + result = tonga_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp); + } + + return result; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h index 8e6670b3cb67..c8bdb92d81f4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h @@ -34,21 +34,24 @@ enum _phw_tonga_ptc_config_reg_type { }; typedef enum _phw_tonga_ptc_config_reg_type phw_tonga_ptc_config_reg_type; +/* PowerContainment Features */ +#define POWERCONTAINMENT_FEATURE_DTE 0x00000001 + + /* PowerContainment Features */ #define POWERCONTAINMENT_FEATURE_BAPM 0x00000001 #define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 #define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 -struct _phw_tonga_pt_config_reg { +struct tonga_pt_config_reg { uint32_t Offset; uint32_t Mask; uint32_t Shift; uint32_t Value; phw_tonga_ptc_config_reg_type Type; }; -typedef struct _phw_tonga_pt_config_reg phw_tonga_pt_config_reg; -struct _phw_tonga_pt_defaults { +struct tonga_pt_defaults { uint8_t svi_load_line_en; uint8_t svi_load_line_vddC; uint8_t tdc_vddc_throttle_release_limit_perc; @@ -60,7 +63,18 @@ struct _phw_tonga_pt_defaults { uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS]; uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS]; }; -typedef struct _phw_tonga_pt_defaults phw_tonga_pt_defaults; + + + +void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); +int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); +int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr); +int tonga_enable_smc_cac(struct pp_hwmgr *hwmgr); +int tonga_disable_smc_cac(struct pp_hwmgr *hwmgr); +int tonga_enable_power_containment(struct pp_hwmgr *hwmgr); +int tonga_disable_power_containment(struct pp_hwmgr *hwmgr); +int tonga_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); +int tonga_power_control_set_level(struct pp_hwmgr *hwmgr); #endif From c632d7994360aa06158ccb089ad7e4814ce8f08b Mon Sep 17 00:00:00 2001 From: Flora Cui Date: Tue, 2 Aug 2016 11:32:41 +0800 Subject: [PATCH 136/171] amdgpu: move ttm stuff to amdgpu_ttm.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Flora Cui Reviewed-by: Michel Dänzer Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 45 +------------- drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h | 8 --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 78 +++++++++++++++++++++++++ 3 files changed, 80 insertions(+), 51 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 1a7e05da470e..06a7cab3bcc7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -51,6 +51,7 @@ #include "amdgpu_ih.h" #include "amdgpu_irq.h" #include "amdgpu_ucode.h" +#include "amdgpu_ttm.h" #include "amdgpu_gds.h" #include "amd_powerplay.h" #include "amdgpu_acp.h" @@ -397,51 +398,9 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); /* - * TTM. + * BO. */ -#define AMDGPU_TTM_LRU_SIZE 20 - -struct amdgpu_mman_lru { - struct list_head *lru[TTM_NUM_MEM_TYPES]; - struct list_head *swap_lru; -}; - -struct amdgpu_mman { - struct ttm_bo_global_ref bo_global_ref; - struct drm_global_reference mem_global_ref; - struct ttm_bo_device bdev; - bool mem_global_referenced; - bool initialized; - -#if defined(CONFIG_DEBUG_FS) - struct dentry *vram; - struct dentry *gtt; -#endif - - /* buffer handling */ - const struct amdgpu_buffer_funcs *buffer_funcs; - struct amdgpu_ring *buffer_funcs_ring; - /* Scheduler entity for buffer moves */ - struct amd_sched_entity entity; - - /* custom LRU management */ - struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE]; -}; - -int amdgpu_copy_buffer(struct amdgpu_ring *ring, - uint64_t src_offset, - uint64_t dst_offset, - uint32_t byte_count, - struct reservation_object *resv, - struct fence **fence); -int amdgpu_fill_buffer(struct amdgpu_bo *bo, - uint32_t src_data, - struct reservation_object *resv, - struct fence **fence); - -int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); - struct amdgpu_bo_list_entry { struct amdgpu_bo *robj; struct ttm_validate_buffer tv; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h index 503d54098128..e73728d90388 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h @@ -31,14 +31,6 @@ #define AMDGPU_GWS_SHIFT PAGE_SHIFT #define AMDGPU_OA_SHIFT PAGE_SHIFT -#define AMDGPU_PL_GDS TTM_PL_PRIV0 -#define AMDGPU_PL_GWS TTM_PL_PRIV1 -#define AMDGPU_PL_OA TTM_PL_PRIV2 - -#define AMDGPU_PL_FLAG_GDS TTM_PL_FLAG_PRIV0 -#define AMDGPU_PL_FLAG_GWS TTM_PL_FLAG_PRIV1 -#define AMDGPU_PL_FLAG_OA TTM_PL_FLAG_PRIV2 - struct amdgpu_ring; struct amdgpu_bo; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h new file mode 100644 index 000000000000..8927e0e5012a --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -0,0 +1,78 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_TTM_H__ +#define __AMDGPU_TTM_H__ + +#include "gpu_scheduler.h" + +#define AMDGPU_PL_GDS TTM_PL_PRIV0 +#define AMDGPU_PL_GWS TTM_PL_PRIV1 +#define AMDGPU_PL_OA TTM_PL_PRIV2 + +#define AMDGPU_PL_FLAG_GDS TTM_PL_FLAG_PRIV0 +#define AMDGPU_PL_FLAG_GWS TTM_PL_FLAG_PRIV1 +#define AMDGPU_PL_FLAG_OA TTM_PL_FLAG_PRIV2 + +#define AMDGPU_TTM_LRU_SIZE 20 + +struct amdgpu_mman_lru { + struct list_head *lru[TTM_NUM_MEM_TYPES]; + struct list_head *swap_lru; +}; + +struct amdgpu_mman { + struct ttm_bo_global_ref bo_global_ref; + struct drm_global_reference mem_global_ref; + struct ttm_bo_device bdev; + bool mem_global_referenced; + bool initialized; + +#if defined(CONFIG_DEBUG_FS) + struct dentry *vram; + struct dentry *gtt; +#endif + + /* buffer handling */ + const struct amdgpu_buffer_funcs *buffer_funcs; + struct amdgpu_ring *buffer_funcs_ring; + /* Scheduler entity for buffer moves */ + struct amd_sched_entity entity; + + /* custom LRU management */ + struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE]; +}; + +int amdgpu_copy_buffer(struct amdgpu_ring *ring, + uint64_t src_offset, + uint64_t dst_offset, + uint32_t byte_count, + struct reservation_object *resv, + struct fence **fence); +int amdgpu_fill_buffer(struct amdgpu_bo *bo, + uint32_t src_data, + struct reservation_object *resv, + struct fence **fence); + +int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); +#endif From 408778e8e9656105aa4df2fe7dce54244ccdd3d7 Mon Sep 17 00:00:00 2001 From: Flora Cui Date: Thu, 18 Aug 2016 12:55:13 +0800 Subject: [PATCH 137/171] drm/amdgpu: check domain sanity in amdgpu_bo_pin_restricted() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit abort if the bo is pined to other domain already Signed-off-by: Flora Cui Reviewed-by: Michel Dänzer Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 4d084eeb4e09..0a0f3cb89f92 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -520,6 +520,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, return -EINVAL; if (bo->pin_count) { + uint32_t mem_type = bo->tbo.mem.mem_type; + + if (domain != amdgpu_mem_type_to_domain(mem_type)) + return -EINVAL; + bo->pin_count++; if (gpu_addr) *gpu_addr = amdgpu_bo_gpu_offset(bo); From 32ab75f076ac14a9b2e2e4d0910ecfb102ba9747 Mon Sep 17 00:00:00 2001 From: Flora Cui Date: Thu, 18 Aug 2016 13:17:07 +0800 Subject: [PATCH 138/171] drm/amdgpu: update gart_pin_size only if the bo is pined to GTT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Flora Cui Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 0a0f3cb89f92..dbab3c0d096d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -578,7 +578,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, bo->adev->vram_pin_size += amdgpu_bo_size(bo); if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) bo->adev->invisible_pin_size += amdgpu_bo_size(bo); - } else { + } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { bo->adev->gart_pin_size += amdgpu_bo_size(bo); } From 27798e07ed9a8e9ae8d71c7a2b0ec3f313076cb8 Mon Sep 17 00:00:00 2001 From: Flora Cui Date: Thu, 18 Aug 2016 13:18:09 +0800 Subject: [PATCH 139/171] drm/amdgpu: use domain's gpu_offset for start addr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Flora Cui Reviewed-by: Ken Wang Reviewed-by: Michel Dänzer Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 6 +----- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 8 ++------ 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index dbab3c0d096d..2c08e1b38656 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -530,11 +530,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, *gpu_addr = amdgpu_bo_gpu_offset(bo); if (max_offset != 0) { - u64 domain_start; - if (domain == AMDGPU_GEM_DOMAIN_VRAM) - domain_start = bo->adev->mc.vram_start; - else - domain_start = bo->adev->mc.gtt_start; + u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset; WARN_ON_ONCE(max_offset < (amdgpu_bo_gpu_offset(bo) - domain_start)); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 3cf54c1b21d4..783a9703c160 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -257,10 +257,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, switch (old_mem->mem_type) { case TTM_PL_VRAM: - old_start += adev->mc.vram_start; - break; case TTM_PL_TT: - old_start += adev->mc.gtt_start; + old_start += bo->bdev->man[old_mem->mem_type].gpu_offset; break; default: DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); @@ -268,10 +266,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, } switch (new_mem->mem_type) { case TTM_PL_VRAM: - new_start += adev->mc.vram_start; - break; case TTM_PL_TT: - new_start += adev->mc.gtt_start; + new_start += bo->bdev->man[new_mem->mem_type].gpu_offset; break; default: DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); From abc8c1ce48b6ab32224cd6902645f9acb8e7e686 Mon Sep 17 00:00:00 2001 From: jimqu Date: Fri, 19 Aug 2016 09:31:50 +0800 Subject: [PATCH 140/171] drm/amd/amdgpu: S3 resumed failed after 4-5 times loop MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Phenomenon: software hang when device resume back, read UVD fence is 0xffffffff and read pcie pid is 0xffff. The issue is caused by VCE reset when update cg setting. according to HW programming guide, adjust update VCE cg sequence. The patch apply to VCE2.0. Signed-off-by: JimQu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vce_v2_0.c | 137 ++++++++++++++++---------- 1 file changed, 83 insertions(+), 54 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 21ba219e943b..5fa55b52c00e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -40,6 +40,7 @@ #define VCE_V2_0_FW_SIZE (256 * 1024) #define VCE_V2_0_STACK_SIZE (64 * 1024) #define VCE_V2_0_DATA_SIZE (23552 * AMDGPU_MAX_VCE_HANDLES) +#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 static void vce_v2_0_mc_resume(struct amdgpu_device *adev); static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev); @@ -96,6 +97,49 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring) WREG32(mmVCE_RB_WPTR2, ring->wptr); } +static int vce_v2_0_lmi_clean(struct amdgpu_device *adev) +{ + int i, j; + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + uint32_t status = RREG32(mmVCE_LMI_STATUS); + + if (status & 0x337f) + return 0; + mdelay(10); + } + } + + return -ETIMEDOUT; +} + +static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev) +{ + int i, j; + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + uint32_t status = RREG32(mmVCE_STATUS); + + if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK) + return 0; + mdelay(10); + } + + DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + WREG32_P(mmVCE_SOFT_RESET, 0, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + } + + return -ETIMEDOUT; +} + /** * vce_v2_0_start - start VCE block * @@ -106,7 +150,7 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring) static int vce_v2_0_start(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - int i, j, r; + int r; vce_v2_0_mc_resume(adev); @@ -132,25 +176,7 @@ static int vce_v2_0_start(struct amdgpu_device *adev) mdelay(100); WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); - for (i = 0; i < 10; ++i) { - uint32_t status; - for (j = 0; j < 100; ++j) { - status = RREG32(mmVCE_STATUS); - if (status & 2) - break; - mdelay(10); - } - r = 0; - if (status & 2) - break; - - DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); - WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); - mdelay(10); - WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); - mdelay(10); - r = -1; - } + r = vce_v2_0_firmware_loaded(adev); /* clear BUSY flag */ WREG32_P(mmVCE_STATUS, 0, ~1); @@ -332,47 +358,50 @@ static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated) static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated) { - u32 orig, tmp; - - if (gated) { - if (vce_v2_0_wait_for_idle(adev)) { - DRM_INFO("VCE is busy, Can't set clock gateing"); - return; - } - WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0); - WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); - mdelay(100); - WREG32(mmVCE_STATUS, 0); - } else { - WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1); - WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); - mdelay(100); + if (vce_v2_0_wait_for_idle(adev)) { + DRM_INFO("VCE is busy, Can't set clock gateing"); + return; } - tmp = RREG32(mmVCE_CLOCK_GATING_B); - tmp &= ~0x00060006; - if (gated) { - tmp |= 0xe10000; - } else { - tmp |= 0xe1; - tmp &= ~0xe10000; + WREG32_P(mmVCE_LMI_CTRL2, 0x100, ~0x100); + + if (vce_v2_0_lmi_clean(adev)) { + DRM_INFO("LMI is busy, Can't set clock gateing"); + return; } - WREG32(mmVCE_CLOCK_GATING_B, tmp); - orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING); - tmp &= ~0x1fe000; - tmp &= ~0xff000000; - if (tmp != orig) - WREG32(mmVCE_UENC_CLOCK_GATING, tmp); - - orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); - tmp &= ~0x3fc; - if (tmp != orig) - WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); + WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK); + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32(mmVCE_STATUS, 0); if (gated) WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); - WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + /* LMI_MC/LMI_UMC always set in dynamic, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} */ + if (gated) { + /* Force CLOCK OFF , set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {*, 1} */ + WREG32(mmVCE_CLOCK_GATING_B, 0xe90010); + } else { + /* Force CLOCK ON, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {1, 0} */ + WREG32(mmVCE_CLOCK_GATING_B, 0x800f1); + } + + /* Set VCE_UENC_CLOCK_GATING always in dynamic mode {*_FORCE_ON, *_FORCE_OFF} = {0, 0}*/; + WREG32(mmVCE_UENC_CLOCK_GATING, 0x40); + + /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */ + WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00); + + WREG32_P(mmVCE_LMI_CTRL2, 0, ~0x100); + if(!gated) { + WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); + mdelay(100); + WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + + vce_v2_0_firmware_loaded(adev); + WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); + } } static void vce_v2_0_disable_cg(struct amdgpu_device *adev) From 3ad81f167e86e3ad81f2611896d57165c62f8244 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Fri, 5 Aug 2016 17:30:17 +0800 Subject: [PATCH 141/171] drm/amdgpu: add need backup function V2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit V2: add checking if need backup in amdgpu_bo_create. Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 8 ++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 06a7cab3bcc7..6fa826846e6d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -2376,6 +2376,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) /* Common functions */ int amdgpu_gpu_reset(struct amdgpu_device *adev); +bool amdgpu_need_backup(struct amdgpu_device *adev); void amdgpu_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_card_posted(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index c356bb538b43..21f4b748aff0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2049,6 +2049,14 @@ static int amdgpu_post_soft_reset(struct amdgpu_device *adev) return 0; } +bool amdgpu_need_backup(struct amdgpu_device *adev) +{ + if (adev->flags & AMD_IS_APU) + return false; + + return amdgpu_lockup_timeout > 0 ? true : false; +} + /** * amdgpu_gpu_reset - reset the asic * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 2c08e1b38656..60acbd22174e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -436,7 +436,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, if (r) return r; - if (flags & AMDGPU_GEM_CREATE_SHADOW) { + if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) { r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr)); if (r) amdgpu_bo_unref(bo_ptr); From e24db98529ab90387b4603ce580b04f7ad152e8a Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Mon, 15 Aug 2016 10:46:04 +0800 Subject: [PATCH 142/171] drm/amdgpu: add direct submision option for copy_buffer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_test.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 23 +++++++++++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 2 +- 4 files changed, 21 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 33e47a43ae32..345305235349 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c @@ -39,7 +39,8 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, start_jiffies = jiffies; for (i = 0; i < n; i++) { struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; - r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence); + r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence, + false); if (r) goto exit_do_move; r = fence_wait(fence, false); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index 05a53f4fc334..b827c75e95de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -111,7 +111,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) amdgpu_bo_kunmap(gtt_obj[i]); r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr, - size, NULL, &fence); + size, NULL, &fence, false); if (r) { DRM_ERROR("Failed GTT->VRAM copy %d\n", i); @@ -156,7 +156,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) amdgpu_bo_kunmap(vram_obj); r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr, - size, NULL, &fence); + size, NULL, &fence, false); if (r) { DRM_ERROR("Failed VRAM->GTT copy %d\n", i); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 783a9703c160..bdc76e28a240 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -282,7 +282,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, r = amdgpu_copy_buffer(ring, old_start, new_start, new_mem->num_pages * PAGE_SIZE, /* bytes */ - bo->resv, &fence); + bo->resv, &fence, false); if (r) return r; @@ -1143,7 +1143,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t dst_offset, uint32_t byte_count, struct reservation_object *resv, - struct fence **fence) + struct fence **fence, bool direct_submit) { struct amdgpu_device *adev = ring->adev; struct amdgpu_job *job; @@ -1187,12 +1187,21 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, amdgpu_ring_pad_ib(ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > num_dw); - r = amdgpu_job_submit(job, ring, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, fence); - if (r) - goto error_free; + if (direct_submit) { + r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, + NULL, NULL, fence); + job->fence = fence_get(*fence); + if (r) + DRM_ERROR("Error scheduling IBs (%d)\n", r); + amdgpu_job_free(job); + } else { + r = amdgpu_job_submit(job, ring, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, fence); + if (r) + goto error_free; + } - return 0; + return r; error_free: amdgpu_job_free(job); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 8927e0e5012a..4d3927605309 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -68,7 +68,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t dst_offset, uint32_t byte_count, struct reservation_object *resv, - struct fence **fence); + struct fence **fence, bool direct_submit); int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t src_data, struct reservation_object *resv, From 20f4eff1c8ba344d5c22234ac5611ff1489fbea6 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Thu, 4 Aug 2016 16:51:18 +0800 Subject: [PATCH 143/171] drm/amdgpu: sync bo and shadow V3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use shadow flag to judge which direction to sync. V2: Don't need bo pin, so remove it. V3: 1. Split to two functions, one is backup_to_shadow, another is restore_from_shadow. 2. Clean up previous shadow direction difinitions. Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 8 --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 64 ++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 12 ++++ 3 files changed, 76 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 6fa826846e6d..1d6f077d605b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -438,12 +438,6 @@ struct amdgpu_bo_va { #define AMDGPU_GEM_DOMAIN_MAX 0x3 -enum amdgpu_bo_shadow { - AMDGPU_BO_SHADOW_TO_NONE = 0, - AMDGPU_BO_SHADOW_TO_PARENT, - AMDGPU_BO_SHADOW_TO_SHADOW, -}; - struct amdgpu_bo { /* Protected by gem.mutex */ struct list_head list; @@ -470,8 +464,6 @@ struct amdgpu_bo { struct drm_gem_object gem_base; struct amdgpu_bo *parent; struct amdgpu_bo *shadow; - /* indicate if need to sync between bo and shadow */ - enum amdgpu_bo_shadow backup_shadow; struct ttm_bo_kmap_obj dma_buf_vmap; struct amdgpu_mn *mn; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 60acbd22174e..84990415a3dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -445,6 +445,70 @@ int amdgpu_bo_create(struct amdgpu_device *adev, return r; } +int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_bo *bo, + struct reservation_object *resv, + struct fence **fence, + bool direct) + +{ + struct amdgpu_bo *shadow = bo->shadow; + uint64_t bo_addr, shadow_addr; + int r; + + if (!shadow) + return -EINVAL; + + bo_addr = amdgpu_bo_gpu_offset(bo); + shadow_addr = amdgpu_bo_gpu_offset(bo->shadow); + + r = reservation_object_reserve_shared(bo->tbo.resv); + if (r) + goto err; + + r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr, + amdgpu_bo_size(bo), resv, fence, + direct); + if (!r) + amdgpu_bo_fence(bo, *fence, true); + +err: + return r; +} + +int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_bo *bo, + struct reservation_object *resv, + struct fence **fence, + bool direct) + +{ + struct amdgpu_bo *shadow = bo->shadow; + uint64_t bo_addr, shadow_addr; + int r; + + if (!shadow) + return -EINVAL; + + bo_addr = amdgpu_bo_gpu_offset(bo); + shadow_addr = amdgpu_bo_gpu_offset(bo->shadow); + + r = reservation_object_reserve_shared(bo->tbo.resv); + if (r) + goto err; + + r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr, + amdgpu_bo_size(bo), resv, fence, + direct); + if (!r) + amdgpu_bo_fence(bo, *fence, true); + +err: + return r; +} + int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) { bool is_iomem; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index d650b42279e5..b6a27390ef88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -155,6 +155,18 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, bool shared); u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); +int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_bo *bo, + struct reservation_object *resv, + struct fence **fence, bool direct); +int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_bo *bo, + struct reservation_object *resv, + struct fence **fence, + bool direct); + /* * sub allocation From 6557e3d29469157f18342cd47e05792271e64e7c Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Mon, 15 Aug 2016 11:36:54 +0800 Subject: [PATCH 144/171] drm/amdgpu: update pd shadow while updating pd V2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit V2: Checking if shadow is valid. Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 60 ++++++++++++++++++-------- 2 files changed, 42 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 1d6f077d605b..60030f951152 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -816,6 +816,7 @@ struct amdgpu_ring { struct amdgpu_vm_pt { struct amdgpu_bo_list_entry entry; uint64_t addr; + uint64_t shadow_addr; }; struct amdgpu_vm { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 7ca2e8fa5906..1eae307cdfd4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -604,24 +604,14 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) return result; } -/** - * amdgpu_vm_update_pdes - make sure that page directory is valid - * - * @adev: amdgpu_device pointer - * @vm: requested vm - * @start: start of GPU address range - * @end: end of GPU address range - * - * Allocates new page tables if necessary - * and updates the page directory. - * Returns 0 for success, error for failure. - */ -int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, - struct amdgpu_vm *vm) +static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + bool shadow) { struct amdgpu_ring *ring; - struct amdgpu_bo *pd = vm->page_directory; - uint64_t pd_addr = amdgpu_bo_gpu_offset(pd); + struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow : + vm->page_directory; + uint64_t pd_addr; uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; uint64_t last_pde = ~0, last_pt = ~0; unsigned count = 0, pt_idx, ndw; @@ -631,6 +621,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, int r; + if (!pd) + return 0; + pd_addr = amdgpu_bo_gpu_offset(pd); ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); /* padding, etc. */ @@ -656,9 +649,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, continue; pt = amdgpu_bo_gpu_offset(bo); - if (vm->page_tables[pt_idx].addr == pt) - continue; - vm->page_tables[pt_idx].addr = pt; + if (!shadow) { + if (vm->page_tables[pt_idx].addr == pt) + continue; + vm->page_tables[pt_idx].addr = pt; + } else { + if (vm->page_tables[pt_idx].shadow_addr == pt) + continue; + vm->page_tables[pt_idx].shadow_addr = pt; + } pde = pd_addr + pt_idx * 8; if (((last_pde + 8 * count) != pde) || @@ -709,6 +708,29 @@ error_free: return r; } +/* + * amdgpu_vm_update_pdes - make sure that page directory is valid + * + * @adev: amdgpu_device pointer + * @vm: requested vm + * @start: start of GPU address range + * @end: end of GPU address range + * + * Allocates new page tables if necessary + * and updates the page directory. + * Returns 0 for success, error for failure. + */ +int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, + struct amdgpu_vm *vm) +{ + int r; + + r = amdgpu_vm_update_pd_or_shadow(adev, vm, true); + if (r) + return r; + return amdgpu_vm_update_pd_or_shadow(adev, vm, false); +} + /** * amdgpu_vm_update_ptes - make sure that page tables are valid * From 4c7e885506eaefc55c2b45293b52cae9ef797d67 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Mon, 15 Aug 2016 11:46:21 +0800 Subject: [PATCH 145/171] drm/amdgpu: update pt shadow while updating pt V2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit V2: move shadow parameter to amdgpu_pte_update_params. Signed-off-by: Chunming Zhou Reviewed-by: Christian König Cc: minutemaidpark@hotmail.com Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 1eae307cdfd4..d2824d97b7e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -65,6 +65,8 @@ struct amdgpu_pte_update_params { void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); + /* indicate update pt or its shadow */ + bool shadow; }; /** @@ -761,7 +763,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, addr = start; pt_idx = addr >> amdgpu_vm_block_size; pt = vm->page_tables[pt_idx].entry.robj; - + if (params->shadow) { + if (!pt->shadow) + return; + pt = vm->page_tables[pt_idx].entry.robj->shadow; + } if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; else @@ -780,6 +786,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, while (addr < end) { pt_idx = addr >> amdgpu_vm_block_size; pt = vm->page_tables[pt_idx].entry.robj; + if (params->shadow) { + if (!pt->shadow) + return; + pt = vm->page_tables[pt_idx].entry.robj->shadow; + } if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; @@ -1004,6 +1015,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; + params.shadow = true; + amdgpu_vm_frag_ptes(¶ms, vm, start, last + 1, addr, flags); + params.shadow = false; amdgpu_vm_frag_ptes(¶ms, vm, start, last + 1, addr, flags); amdgpu_ring_pad_ib(ring, params.ib); From 0c4e7fa56112b0eaef0bf88b569608620e0465b8 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Wed, 17 Aug 2016 11:41:30 +0800 Subject: [PATCH 146/171] drm/amdgpu: link all shadow bo V2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit V2: 1. use mutex instead of spinlock for shadow list, since its process could sleep. 2. move list_del to bo destroy phase. Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 5 +++++ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 12 +++++++++++- 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 60030f951152..e30a0d6353cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -468,6 +468,7 @@ struct amdgpu_bo { struct ttm_bo_kmap_obj dma_buf_vmap; struct amdgpu_mn *mn; struct list_head mn_list; + struct list_head shadow_list; }; #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base) @@ -2096,6 +2097,10 @@ struct amdgpu_device { struct kfd_dev *kfd; struct amdgpu_virtualization virtualization; + + /* link all shadow bo */ + struct list_head shadow_list; + struct mutex shadow_list_lock; }; bool amdgpu_device_is_px(struct drm_device *dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 21f4b748aff0..76fd2dec7b42 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1550,6 +1550,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, spin_lock_init(&adev->gc_cac_idx_lock); spin_lock_init(&adev->audio_endpt_idx_lock); + INIT_LIST_HEAD(&adev->shadow_list); + mutex_init(&adev->shadow_list_lock); + adev->rmmio_base = pci_resource_start(adev->pdev, 5); adev->rmmio_size = pci_resource_len(adev->pdev, 5); adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 84990415a3dc..9113fffbb8b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -98,6 +98,11 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) drm_gem_object_release(&bo->gem_base); amdgpu_bo_unref(&bo->parent); + if (!list_empty(&bo->shadow_list)) { + mutex_lock(&bo->adev->shadow_list_lock); + list_del_init(&bo->shadow_list); + mutex_unlock(&bo->adev->shadow_list_lock); + } kfree(bo->metadata); kfree(bo); } @@ -315,6 +320,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, } bo->adev = adev; INIT_LIST_HEAD(&bo->list); + INIT_LIST_HEAD(&bo->shadow_list); INIT_LIST_HEAD(&bo->va); bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT | @@ -407,8 +413,12 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, NULL, &placement, bo->tbo.resv, &bo->shadow); - if (!r) + if (!r) { bo->shadow->parent = amdgpu_bo_ref(bo); + mutex_lock(&adev->shadow_list_lock); + list_add_tail(&bo->shadow_list, &adev->shadow_list); + mutex_unlock(&adev->shadow_list_lock); + } return r; } From 53cdccd5200f5051460e778be11cc371474bc3b5 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Thu, 21 Jul 2016 17:20:52 +0800 Subject: [PATCH 147/171] drm/amdgpu: recover vram bo from shadow after gpu reset V2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit V2: 1. don't directly submit to many jobs at the same time. 2. delete unrelated printk. Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 62 ++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 76fd2dec7b42..c38dc47cd767 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2060,6 +2060,35 @@ bool amdgpu_need_backup(struct amdgpu_device *adev) return amdgpu_lockup_timeout > 0 ? true : false; } +static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_bo *bo, + struct fence **fence) +{ + uint32_t domain; + int r; + + if (!bo->shadow) + return 0; + + r = amdgpu_bo_reserve(bo, false); + if (r) + return r; + domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); + /* if bo has been evicted, then no need to recover */ + if (domain == AMDGPU_GEM_DOMAIN_VRAM) { + r = amdgpu_bo_restore_from_shadow(adev, ring, bo, + NULL, fence, true); + if (r) { + DRM_ERROR("recover page table failed!\n"); + goto err; + } + } +err: + amdgpu_bo_unreserve(bo); + return r; +} + /** * amdgpu_gpu_reset - reset the asic * @@ -2138,13 +2167,46 @@ retry: if (r) { dev_err(adev->dev, "ib ring test failed (%d).\n", r); r = amdgpu_suspend(adev); + need_full_reset = true; goto retry; } + /** + * recovery vm page tables, since we cannot depend on VRAM is + * consistent after gpu full reset. + */ + if (need_full_reset && amdgpu_need_backup(adev)) { + struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; + struct amdgpu_bo *bo, *tmp; + struct fence *fence = NULL, *next = NULL; + DRM_INFO("recover vram bo from shadow\n"); + mutex_lock(&adev->shadow_list_lock); + list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { + amdgpu_recover_vram_from_shadow(adev, ring, bo, &next); + if (fence) { + r = fence_wait(fence, false); + if (r) { + WARN(r, "recovery from shadow isn't comleted\n"); + break; + } + } + + fence_put(fence); + fence = next; + } + mutex_unlock(&adev->shadow_list_lock); + if (fence) { + r = fence_wait(fence, false); + if (r) + WARN(r, "recovery from shadow isn't comleted\n"); + } + fence_put(fence); + } for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; if (!ring) continue; + amd_sched_job_recovery(&ring->sched); kthread_unpark(ring->sched.thread); } From 83a59b6338c71425f3159fc9ab31380f237af733 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Ol=C5=A1=C3=A1k?= Date: Wed, 17 Aug 2016 23:58:58 +0200 Subject: [PATCH 148/171] drm/amdgpu: add AMDGPU_INFO_NUM_EVICTIONS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For profiling. v2: really bump the minor version Signed-off-by: Marek Olšák Reviewed-by: Christian König Reviewed-by: Edward O'Callaghan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 3 +++ include/uapi/drm/amdgpu_drm.h | 2 ++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 58b1db82f589..f5c99a008717 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -53,9 +53,10 @@ * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same * at the end of IBs. * - 3.3.0 - Add VM support for UVD on supported hardware. + * - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS. */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 3 +#define KMS_DRIVER_MINOR 4 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index d942654a1de0..4c8e68a82c47 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -373,6 +373,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file case AMDGPU_INFO_NUM_BYTES_MOVED: ui64 = atomic64_read(&adev->num_bytes_moved); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; + case AMDGPU_INFO_NUM_EVICTIONS: + ui64 = atomic64_read(&adev->num_evictions); + return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_VRAM_USAGE: ui64 = atomic64_read(&adev->vram_usage); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 5aef0b71079b..ae2845fdcb5f 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -485,6 +485,8 @@ struct drm_amdgpu_cs_chunk_data { #define AMDGPU_INFO_DEV_INFO 0x16 /* visible vram usage */ #define AMDGPU_INFO_VIS_VRAM_USAGE 0x17 +/* number of TTM buffer evictions */ +#define AMDGPU_INFO_NUM_EVICTIONS 0x18 #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 #define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff From 8cc5e80641ed5e3973bd44c170529a756742e990 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 18 Aug 2016 10:25:52 -0400 Subject: [PATCH 149/171] drm/amd/powerplay: Tidy up cz_hwmgr.c Clean up whitespace and formatting. Signed-off-by: Tom St Denis Reviewed-by: Edward O'Callaghan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | 53 +++++-------------- 1 file changed, 13 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 40153847675a..b75fb123f370 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -178,7 +178,6 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) int result; cz_hwmgr->gfx_ramp_step = 256*25/100; - cz_hwmgr->gfx_ramp_delay = 1; /* by default, we delay 1us */ for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) @@ -186,33 +185,19 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) cz_hwmgr->mgcg_cgtt_local0 = 0x00000000; cz_hwmgr->mgcg_cgtt_local1 = 0x00000000; - cz_hwmgr->clock_slow_down_freq = 25000; - cz_hwmgr->skip_clock_slow_down = 1; - cz_hwmgr->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */ - cz_hwmgr->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */ - cz_hwmgr->voting_rights_clients = 0x00C00033; - cz_hwmgr->static_screen_threshold = 8; - cz_hwmgr->ddi_power_gating_disabled = 0; - cz_hwmgr->bapm_enabled = 1; - cz_hwmgr->voltage_drop_threshold = 0; - cz_hwmgr->gfx_power_gating_threshold = 500; - cz_hwmgr->vce_slow_sclk_threshold = 20000; - cz_hwmgr->dce_slow_sclk_threshold = 30000; - cz_hwmgr->disable_driver_thermal_policy = 1; - cz_hwmgr->disable_nb_ps3_in_battery = 0; phm_cap_unset(hwmgr->platform_descriptor.platformCaps, @@ -230,9 +215,7 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_DynamicPatchPowerState); cz_hwmgr->thermal_auto_throttling_treshold = 0; - cz_hwmgr->tdr_clock = 0; - cz_hwmgr->disable_gfx_power_gating_in_uvd = 0; phm_cap_set(hwmgr->platform_descriptor.platformCaps, @@ -447,19 +430,12 @@ static int cz_construct_boot_state(struct pp_hwmgr *hwmgr) (uint8_t)cz_hwmgr->sys_info.bootup_nb_voltage_index; cz_hwmgr->boot_power_level.dsDividerIndex = 0; - cz_hwmgr->boot_power_level.ssDividerIndex = 0; - cz_hwmgr->boot_power_level.allowGnbSlow = 1; - cz_hwmgr->boot_power_level.forceNBPstate = 0; - cz_hwmgr->boot_power_level.hysteresis_up = 0; - cz_hwmgr->boot_power_level.numSIMDToPowerDown = 0; - cz_hwmgr->boot_power_level.display_wm = 0; - cz_hwmgr->boot_power_level.vce_wm = 0; return 0; @@ -746,7 +722,6 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr, cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk; clock = hwmgr->display_config.min_core_set_clock; -; if (clock == 0) printk(KERN_INFO "[ powerplay ] min_core_set_clock not set\n"); @@ -829,7 +804,7 @@ static int cz_tf_set_watermark_threshold(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetWatermarkFrequency, - cz_hwmgr->sclk_dpm.soft_max_clk); + cz_hwmgr->sclk_dpm.soft_max_clk); return 0; } @@ -855,9 +830,9 @@ static int cz_tf_enable_nb_dpm(struct pp_hwmgr *hwmgr, PP_DBG_LOG("enabling ALL SMU features.\n"); dpm_features |= NB_DPM_MASK; ret = smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, - PPSMC_MSG_EnableAllSmuFeatures, - dpm_features); + hwmgr->smumgr, + PPSMC_MSG_EnableAllSmuFeatures, + dpm_features); if (ret == 0) cz_hwmgr->is_nb_dpm_enabled = true; } @@ -1399,10 +1374,12 @@ int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) PPSMC_MSG_SetUvdHardMin)); cz_enable_disable_uvd_dpm(hwmgr, true); - } else + } else { cz_enable_disable_uvd_dpm(hwmgr, true); - } else + } + } else { cz_enable_disable_uvd_dpm(hwmgr, false); + } return 0; } @@ -1687,13 +1664,10 @@ static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); if (separation_time != - hw_data->cc6_settings.cpu_pstate_separation_time - || cc6_disable != - hw_data->cc6_settings.cpu_cc6_disable - || pstate_disable != - hw_data->cc6_settings.cpu_pstate_disable - || pstate_switch_disable != - hw_data->cc6_settings.nb_pstate_switch_disable) { + hw_data->cc6_settings.cpu_pstate_separation_time || + cc6_disable != hw_data->cc6_settings.cpu_cc6_disable || + pstate_disable != hw_data->cc6_settings.cpu_pstate_disable || + pstate_switch_disable != hw_data->cc6_settings.nb_pstate_switch_disable) { hw_data->cc6_settings.cc6_setting_changed = true; @@ -1796,8 +1770,7 @@ static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p ps = cast_const_PhwCzPowerState(state); level_index = index > ps->level - 1 ? ps->level - 1 : index; - - level->coreClock = ps->levels[level_index].engineClock; + level->coreClock = ps->levels[level_index].engineClock; if (designation == PHM_PerformanceLevelDesignation_PowerContainment) { for (i = 1; i < ps->level; i++) { From 1fdc0b76ea9b6f6a50aa76b7cb7a025f87c1319d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 17 Aug 2016 13:44:20 +0200 Subject: [PATCH 150/171] drm/amdgpu: fix lru size grouping v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adding a BO can make it the insertion point for larger sizes as well. v2: add a comment about the guard structure. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 8 ++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 2 ++ 2 files changed, 10 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index bdc76e28a240..5447973483ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -946,6 +946,8 @@ static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo) struct list_head *res = lru->lru[tbo->mem.mem_type]; lru->lru[tbo->mem.mem_type] = &tbo->lru; + while ((++lru)->lru[tbo->mem.mem_type] == res) + lru->lru[tbo->mem.mem_type] = &tbo->lru; return res; } @@ -956,6 +958,8 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo) struct list_head *res = lru->swap_lru; lru->swap_lru = &tbo->swap; + while ((++lru)->swap_lru == res) + lru->swap_lru = &tbo->swap; return res; } @@ -1003,6 +1007,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) lru->swap_lru = &adev->mman.bdev.glob->swap_lru; } + for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) + adev->mman.guard.lru[j] = NULL; + adev->mman.guard.swap_lru = NULL; + adev->mman.initialized = true; r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, adev->mc.real_vram_size >> PAGE_SHIFT); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 4d3927605309..72f6bfc15d8f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -61,6 +61,8 @@ struct amdgpu_mman { /* custom LRU management */ struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE]; + /* guard for log2_size array, don't add anything in between */ + struct amdgpu_mman_lru guard; }; int amdgpu_copy_buffer(struct amdgpu_ring *ring, From faceaf6a731110f6b3c1a57f897a613d720d2156 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 15 Aug 2016 14:06:50 +0200 Subject: [PATCH 151/171] drm/amdgpu: cleanup amdgpu_ttm_placement_init MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make it more clear what this function does. No intendet functional change. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 114 +++++++++++---------- 1 file changed, 61 insertions(+), 53 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 9113fffbb8b9..04c376f27611 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -116,87 +116,95 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, struct ttm_placement *placement, - struct ttm_place *placements, + struct ttm_place *places, u32 domain, u64 flags) { u32 c = 0, i; - placement->placement = placements; - placement->busy_placement = placements; - if (domain & AMDGPU_GEM_DOMAIN_VRAM) { + unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT; + if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS && - adev->mc.visible_vram_size < adev->mc.real_vram_size) { - placements[c].fpfn = - adev->mc.visible_vram_size >> PAGE_SHIFT; - placements[c++].flags = TTM_PL_FLAG_WC | + adev->mc.visible_vram_size < adev->mc.real_vram_size) { + places[c].fpfn = visible_pfn; + if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) + places[c].lpfn = visible_pfn; + else + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN; + c++; } - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | + + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; - if (!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) - placements[c - 1].flags |= TTM_PL_FLAG_TOPDOWN; + if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) + places[c].lpfn = visible_pfn; + else + places[c].flags |= TTM_PL_FLAG_TOPDOWN; + c++; } if (domain & AMDGPU_GEM_DOMAIN_GTT) { - if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED; - } else { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_CACHED | - TTM_PL_FLAG_TT; - } + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_TT; + if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) + places[c].flags |= TTM_PL_FLAG_WC | + TTM_PL_FLAG_UNCACHED; + else + places[c].flags |= TTM_PL_FLAG_CACHED; + c++; } if (domain & AMDGPU_GEM_DOMAIN_CPU) { - if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_UNCACHED; - } else { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_CACHED | - TTM_PL_FLAG_SYSTEM; - } + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_SYSTEM; + if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) + places[c].flags |= TTM_PL_FLAG_WC | + TTM_PL_FLAG_UNCACHED; + else + places[c].flags |= TTM_PL_FLAG_CACHED; + c++; } if (domain & AMDGPU_GEM_DOMAIN_GDS) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_UNCACHED | - AMDGPU_PL_FLAG_GDS; + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS; + c++; } + if (domain & AMDGPU_GEM_DOMAIN_GWS) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_UNCACHED | - AMDGPU_PL_FLAG_GWS; + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS; + c++; } + if (domain & AMDGPU_GEM_DOMAIN_OA) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_UNCACHED | - AMDGPU_PL_FLAG_OA; + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA; + c++; } if (!c) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_MASK_CACHING | - TTM_PL_FLAG_SYSTEM; + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + c++; } - placement->num_placement = c; - placement->num_busy_placement = c; - for (i = 0; i < c; i++) { - if ((flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && - (placements[i].flags & TTM_PL_FLAG_VRAM) && - !placements[i].fpfn) - placements[i].lpfn = - adev->mc.visible_vram_size >> PAGE_SHIFT; - else - placements[i].lpfn = 0; - } + placement->num_placement = c; + placement->placement = places; + + placement->num_busy_placement = c; + placement->busy_placement = places; } void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain) From 6369f6f133f8388dbb9aa4040b06c5ac32a2fe8b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 15 Aug 2016 14:08:54 +0200 Subject: [PATCH 152/171] drm/amdgpu: stop adding dummy entry in amdgpu_ttm_placement_init MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit AMDGPU_GEM_CREATE_NO_CPU_ACCESS and AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED are obviously mutual exclusive. So stop adding a dummy entry without effect when both are specified. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 04c376f27611..b17734e0ecc8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -119,18 +119,16 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, struct ttm_place *places, u32 domain, u64 flags) { - u32 c = 0, i; + u32 c = 0; if (domain & AMDGPU_GEM_DOMAIN_VRAM) { unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT; if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS && + !(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && adev->mc.visible_vram_size < adev->mc.real_vram_size) { places[c].fpfn = visible_pfn; - if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) - places[c].lpfn = visible_pfn; - else - places[c].lpfn = 0; + places[c].lpfn = 0; places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN; From 99e124f402d6d649498e2aa3cbcf4563a37fea0e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 16 Aug 2016 14:43:17 +0200 Subject: [PATCH 153/171] drm/amdgpu: cleanup amdgpu_vm_bo_update params MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make it more obvious what we are doing here. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 21 +++++++++++---------- 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index e30a0d6353cd..3cc2629eb158 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -928,7 +928,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_sync *sync); int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, - struct ttm_mem_reg *mem); + bool clear); void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_bo *bo); struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 396a412d70d4..d80e5d3a4add 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -642,7 +642,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, if (bo_va == NULL) continue; - r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem); + r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d2824d97b7e5..bf56f1814437 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1117,28 +1117,32 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, * * @adev: amdgpu_device pointer * @bo_va: requested BO and VM object - * @mem: ttm mem + * @clear: if true clear the entries * * Fill in the page table entries for @bo_va. * Returns 0 for success, -EINVAL for failure. - * - * Object have to be reserved and mutex must be locked! */ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, - struct ttm_mem_reg *mem) + bool clear) { struct amdgpu_vm *vm = bo_va->vm; struct amdgpu_bo_va_mapping *mapping; dma_addr_t *pages_addr = NULL; uint32_t gtt_flags, flags; + struct ttm_mem_reg *mem; struct fence *exclusive; uint64_t addr; int r; - if (mem) { + if (clear) { + mem = NULL; + addr = 0; + exclusive = NULL; + } else { struct ttm_dma_tt *ttm; + mem = &bo_va->bo->tbo.mem; addr = (u64)mem->start << PAGE_SHIFT; switch (mem->mem_type) { case TTM_PL_TT: @@ -1156,9 +1160,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, } exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv); - } else { - addr = 0; - exclusive = NULL; } flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); @@ -1189,7 +1190,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, spin_lock(&vm->status_lock); list_splice_init(&bo_va->invalids, &bo_va->valids); list_del_init(&bo_va->vm_status); - if (!mem) + if (clear) list_add(&bo_va->vm_status, &vm->cleared); spin_unlock(&vm->status_lock); @@ -1252,7 +1253,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_bo_va, vm_status); spin_unlock(&vm->status_lock); - r = amdgpu_vm_bo_update(adev, bo_va, NULL); + r = amdgpu_vm_bo_update(adev, bo_va, true); if (r) return r; From dfaf2291ab6073e33068eabaa3f3d6b8632bc81c Mon Sep 17 00:00:00 2001 From: Alexandre Demers Date: Sun, 21 Aug 2016 20:38:41 -0400 Subject: [PATCH 154/171] drm/amdgpu: Use correct mask in dce_v8_0_afmt_setmode() and fix comment typos. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We were using the same mask twice. Looking at radeon, it seems we should be using HDMI_AVI_INFO_CONT instead as the second mask. Being there, fix typos in comments and improved readability. I haven't looked at other DCEs, the mask may also be wrong for them. Reviewed-by: Edward O'Callaghan Reviewed-by: Christian König Signed-off-by: Alexandre Demers Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index efca1b972813..86a5fa0cccd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -1739,6 +1739,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, /* Silent, r600_hdmi_enable will raise WARN for us */ if (!dig->afmt->enabled) return; + offset = dig->afmt->offset; /* hdmi deep color mode general control packets setup, if bpc > 8 */ @@ -1863,7 +1864,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset, HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */ - HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK); /* required for audio info values to be updated */ + HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK); /* required for audio info values to be updated */ WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset, (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */ @@ -1872,13 +1873,12 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset, AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */ - /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */ WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF); WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF); WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001); WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001); - /* enable audio after to setting up hw */ + /* enable audio after setting up hw */ dce_v8_0_audio_enable(adev, dig->afmt->pin, true); } From 5da25cd5754b97088f2f33bdc811f379b5b3e9ae Mon Sep 17 00:00:00 2001 From: Alexandre Demers Date: Sun, 21 Aug 2016 20:38:26 -0400 Subject: [PATCH 155/171] drm/amdgpu: Fix indentation in dce_v8_0_audio_write_sad_regs() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixed indentation for readability. Reviewed-by: Christian König Reviewed-by: Edward O'Callaghan Signed-off-by: Alexandre Demers Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 86a5fa0cccd5..845a30b03cb4 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -1547,13 +1547,13 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder) if (sad->format == eld_reg_to_type[i][1]) { if (sad->channels > max_channels) { - value = (sad->channels << - AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) | - (sad->byte2 << - AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) | - (sad->freq << - AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT); - max_channels = sad->channels; + value = (sad->channels << + AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) | + (sad->byte2 << + AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) | + (sad->freq << + AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT); + max_channels = sad->channels; } if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) From 5c0eb98e484067c3492352bb7f504653302cf5f2 Mon Sep 17 00:00:00 2001 From: Heinrich Schuchardt Date: Sun, 21 Aug 2016 20:06:32 +0200 Subject: [PATCH 156/171] drm/amdgpu/gmc7: remove dead code (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In an if block for (running == 0) running cannot be non-zero. v2: agd: remove unused variable Reviewed-by: Christian König Signed-off-by: Heinrich Schuchardt Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index bca50e590fdb..aa0c4b964621 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -183,7 +183,7 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev) const struct mc_firmware_header_v1_0 *hdr; const __le32 *fw_data = NULL; const __le32 *io_mc_regs = NULL; - u32 running, blackout = 0; + u32 running; int i, ucode_size, regs_size; if (!adev->mc.fw) @@ -203,11 +203,6 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev) running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); if (running == 0) { - if (running) { - blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); - WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1); - } - /* reset the engine and set to writable */ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); @@ -239,9 +234,6 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev) break; udelay(1); } - - if (running) - WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout); } return 0; From 887656f0b7da0fdbb22740907c53cbc4aefee0ef Mon Sep 17 00:00:00 2001 From: Heinrich Schuchardt Date: Sun, 21 Aug 2016 20:10:41 +0200 Subject: [PATCH 157/171] drm/amdgpu/gmc8: remove dead code (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In an if block for (running == 0) running cannot be non-zero. v2: agd: remove unused variable Reviewed-by: Christian König Signed-off-by: Heinrich Schuchardt Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 3cbbf53c600b..8e7127f09ff6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -253,7 +253,7 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev) const struct mc_firmware_header_v1_0 *hdr; const __le32 *fw_data = NULL; const __le32 *io_mc_regs = NULL; - u32 running, blackout = 0; + u32 running; int i, ucode_size, regs_size; if (!adev->mc.fw) @@ -279,11 +279,6 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev) running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); if (running == 0) { - if (running) { - blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); - WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1); - } - /* reset the engine and set to writable */ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); @@ -315,9 +310,6 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev) break; udelay(1); } - - if (running) - WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout); } return 0; From d36f3e048e1e81906c776760d70e06bce366ef65 Mon Sep 17 00:00:00 2001 From: Heinrich Schuchardt Date: Sun, 21 Aug 2016 20:21:27 +0200 Subject: [PATCH 158/171] drm/amd/powerplay: avoid NULL pointer dereference MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit if (a == NULL || a->b == NULL) leads to a NULL pointer dereference if a == NULL. Reviewed-by: Christian König Signed-off-by: Heinrich Schuchardt Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 12d3333b9d12..2de34a5a85c2 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -163,12 +163,12 @@ static int pp_hw_fini(void *handle) pp_handle = (struct pp_instance *)handle; eventmgr = pp_handle->eventmgr; - if (eventmgr != NULL || eventmgr->pp_eventmgr_fini != NULL) + if (eventmgr != NULL && eventmgr->pp_eventmgr_fini != NULL) eventmgr->pp_eventmgr_fini(eventmgr); smumgr = pp_handle->smu_mgr; - if (smumgr != NULL || smumgr->smumgr_funcs != NULL || + if (smumgr != NULL && smumgr->smumgr_funcs != NULL && smumgr->smumgr_funcs->smu_fini != NULL) smumgr->smumgr_funcs->smu_fini(smumgr); From bb1800caf773eb8d819e0964c7efd96efe435fa1 Mon Sep 17 00:00:00 2001 From: Heinrich Schuchardt Date: Sun, 21 Aug 2016 20:27:02 +0200 Subject: [PATCH 159/171] drm/amd/powerplay: avoid NULL dereference, cz_hwmgr.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit if (a == NULL || a->b == NULL) leads to a NULL pointer dereference if a == NULL. Reviewed-by: Christian König Signed-off-by: Heinrich Schuchardt Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index b75fb123f370..5ecef1732e20 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -1218,7 +1218,7 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr) static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) { - if (hwmgr != NULL || hwmgr->backend != NULL) { + if (hwmgr != NULL && hwmgr->backend != NULL) { kfree(hwmgr->backend); kfree(hwmgr); } From 6e4b070ea16bf95994824cf598ceaf50ea033256 Mon Sep 17 00:00:00 2001 From: Heinrich Schuchardt Date: Sun, 21 Aug 2016 21:41:49 +0200 Subject: [PATCH 160/171] drm/radeon/cik: remove dead code (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In an if block for (running == 0) running cannot be non-zero. v2: agd: remove unused variable Reviewed-by: Christian König Signed-off-by: Heinrich Schuchardt Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 0c1b9ff433af..a0427f0adbab 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -1871,7 +1871,7 @@ int ci_mc_load_microcode(struct radeon_device *rdev) { const __be32 *fw_data = NULL; const __le32 *new_fw_data = NULL; - u32 running, blackout = 0, tmp; + u32 running, tmp; u32 *io_mc_regs = NULL; const __le32 *new_io_mc_regs = NULL; int i, regs_size, ucode_size; @@ -1912,11 +1912,6 @@ int ci_mc_load_microcode(struct radeon_device *rdev) running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; if (running == 0) { - if (running) { - blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); - WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1); - } - /* reset the engine and set to writable */ WREG32(MC_SEQ_SUP_CNTL, 0x00000008); WREG32(MC_SEQ_SUP_CNTL, 0x00000010); @@ -1964,9 +1959,6 @@ int ci_mc_load_microcode(struct radeon_device *rdev) break; udelay(1); } - - if (running) - WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); } return 0; From ddbbd3be96792665d1106f14cef8f1a11e81ecc3 Mon Sep 17 00:00:00 2001 From: Heinrich Schuchardt Date: Sun, 21 Aug 2016 22:36:29 +0200 Subject: [PATCH 161/171] drm/radeon: remove dead code, si_mc_load_microcode (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In an if block for (running == 0) running cannot be non-zero. v2: agd: remove unused variable Reviewed-by: Christian König Signed-off-by: Heinrich Schuchardt Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/si.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 2523ca96c6c7..ae5ee74e69a3 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -1547,7 +1547,7 @@ int si_mc_load_microcode(struct radeon_device *rdev) { const __be32 *fw_data = NULL; const __le32 *new_fw_data = NULL; - u32 running, blackout = 0; + u32 running; u32 *io_mc_regs = NULL; const __le32 *new_io_mc_regs = NULL; int i, regs_size, ucode_size; @@ -1598,11 +1598,6 @@ int si_mc_load_microcode(struct radeon_device *rdev) running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; if (running == 0) { - if (running) { - blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); - WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1); - } - /* reset the engine and set to writable */ WREG32(MC_SEQ_SUP_CNTL, 0x00000008); WREG32(MC_SEQ_SUP_CNTL, 0x00000010); @@ -1641,9 +1636,6 @@ int si_mc_load_microcode(struct radeon_device *rdev) break; udelay(1); } - - if (running) - WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); } return 0; From 1b5836492de6a753f6bb420f500848761c6f84f3 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Mon, 22 Aug 2016 10:54:28 -0400 Subject: [PATCH 162/171] drm/amd/amdgpu: Print ring name in amdgpu_ib_schedule() If the ring isn't ready lets print out which ring name to help debugging. Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index a31d7ef3032c..f5810f700668 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -142,7 +142,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, } if (!ring->ready) { - dev_err(adev->dev, "couldn't schedule ib\n"); + dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name); return -EINVAL; } From c4795ca642b8bd76b5b6ffba41ba909543273d43 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Aug 2016 16:31:36 -0400 Subject: [PATCH 163/171] drm/amdgpu: fix IB alignment for UVD According to the hw team, it should be 16, not 8. Cc: Peter Fang Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 4c8e68a82c47..eab8d7313244 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -292,7 +292,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file type = AMD_IP_BLOCK_TYPE_UVD; ring_mask = adev->uvd.ring.ready ? 1 : 0; ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; - ib_size_alignment = 8; + ib_size_alignment = 16; break; case AMDGPU_HW_IP_VCE: type = AMD_IP_BLOCK_TYPE_VCE; From a22f803cd22c3aac780757df712aebb79e1778c1 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 23 Aug 2016 10:44:16 -0400 Subject: [PATCH 164/171] drm/amdgpu: fix VCE ib alignment value The VCE rings only require single dword alignment. Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index eab8d7313244..b78e74048f3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -299,7 +299,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file for (i = 0; i < AMDGPU_MAX_VCE_RINGS; i++) ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i); ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; - ib_size_alignment = 8; + ib_size_alignment = 1; break; default: return -EINVAL; From 8dd31d74acc1371d143fd0b2795dc3d16fb47202 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Aug 2016 17:58:14 -0400 Subject: [PATCH 165/171] drm/amdgpu: add support for UVD_NO_OP register MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Writes to this register are the preferred way to do NOPs. Bump the driver version as well. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 1 + drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f5c99a008717..a631c954c4e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -54,9 +54,10 @@ * at the end of IBs. * - 3.3.0 - Add VM support for UVD on supported hardware. * - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS. + * - 3.5.0 - Add support for new UVD_NO_OP register. */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 4 +#define KMS_DRIVER_MINOR 5 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index bf59354d788a..811fe985acdf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -818,6 +818,7 @@ static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx, return r; break; case mmUVD_ENGINE_CNTL: + case mmUVD_NO_OP: break; default: DRM_ERROR("Invalid reg 0x%X!\n", reg); diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h index f3e53b118361..19802e96417e 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h @@ -34,6 +34,7 @@ #define mmUVD_UDEC_ADDR_CONFIG 0x3bd3 #define mmUVD_UDEC_DB_ADDR_CONFIG 0x3bd4 #define mmUVD_UDEC_DBW_ADDR_CONFIG 0x3bd5 +#define mmUVD_NO_OP 0x3bff #define mmUVD_SEMA_CNTL 0x3d00 #define mmUVD_LMI_EXT40_ADDR 0x3d26 #define mmUVD_CTX_INDEX 0x3d28 From 4d6bdbad25e60eac1d14c039cbc32778e9dec8fe Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Aug 2016 18:03:22 -0400 Subject: [PATCH 166/171] drm/radeon: add support for UVD_NO_OP register MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Writes to this register are the preferred way to do NOPs. Bump the driver version as well. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/r600d.h | 1 + drivers/gpu/drm/radeon/radeon_drv.c | 3 ++- drivers/gpu/drm/radeon/radeon_uvd.c | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 1e8495cca41e..2e00a5287bd2 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -1490,6 +1490,7 @@ #define UVD_GPCOM_VCPU_DATA0 0xef10 #define UVD_GPCOM_VCPU_DATA1 0xef14 #define UVD_ENGINE_CNTL 0xef18 +#define UVD_NO_OP 0xeffc #define UVD_SEMA_CNTL 0xf400 #define UVD_RB_ARB_CTRL 0xf480 diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index c01a7c6abb49..8faa4d55d060 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -94,9 +94,10 @@ * 2.44.0 - SET_APPEND_CNT packet3 support * 2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI * 2.46.0 - Add PFP_SYNC_ME support on evergreen + * 2.47.0 - Add UVD_NO_OP register support */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 46 +#define KMS_DRIVER_MINOR 47 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 73dfe01435ea..1ae1f139f14e 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -669,6 +669,7 @@ static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, return r; break; case UVD_ENGINE_CNTL: + case UVD_NO_OP: break; default: DRM_ERROR("Invalid reg 0x%X!\n", From c8b4f288f4276078f169a404e2d1ac1535f82b93 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 23 Aug 2016 09:12:21 -0400 Subject: [PATCH 167/171] drm/amdgpu: switch UVD code to use UVD_NO_OP for padding MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace packet2's with packet0 writes to UVD_NO_OP. The value written to UVD_NO_OP does not matter. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 6 ++++-- drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 2 +- drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 2 +- drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h | 1 + drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h | 1 + 6 files changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 811fe985acdf..af408b6c6e05 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -957,8 +957,10 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ib->ptr[3] = addr >> 32; ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0); ib->ptr[5] = 0; - for (i = 6; i < 16; ++i) - ib->ptr[i] = PACKET2(0); + for (i = 6; i < 16; i += 2) { + ib->ptr[i] = PACKET0(mmUVD_NO_OP, 0); + ib->ptr[i+1] = 0; + } ib->length_dw = 16; if (direct) { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 132e613ed674..10c0407dcb6e 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -116,7 +116,7 @@ static int uvd_v4_2_sw_init(void *handle) ring = &adev->uvd.ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf, + r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf, &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 101de136ba63..8513376062c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -112,7 +112,7 @@ static int uvd_v5_0_sw_init(void *handle) ring = &adev->uvd.ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf, + r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf, &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 7b7e82840c95..2abe8a93c99f 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -116,7 +116,7 @@ static int uvd_v6_0_sw_init(void *handle) ring = &adev->uvd.ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf, + r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf, &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); return r; diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h index eb4cf53427da..cc972d237a7e 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h @@ -34,6 +34,7 @@ #define mmUVD_UDEC_ADDR_CONFIG 0x3bd3 #define mmUVD_UDEC_DB_ADDR_CONFIG 0x3bd4 #define mmUVD_UDEC_DBW_ADDR_CONFIG 0x3bd5 +#define mmUVD_NO_OP 0x3bff #define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW 0x3c69 #define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH 0x3c68 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW 0x3c67 diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h index ec69869c55ff..378f4b6b43da 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h @@ -35,6 +35,7 @@ #define mmUVD_UDEC_DB_ADDR_CONFIG 0x3bd4 #define mmUVD_UDEC_DBW_ADDR_CONFIG 0x3bd5 #define mmUVD_POWER_STATUS_U 0x3bfd +#define mmUVD_NO_OP 0x3bff #define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW 0x3c69 #define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH 0x3c68 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW 0x3c67 From 70a033d25b197b0a4e60509911195613cf28b57e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 23 Aug 2016 10:07:28 -0400 Subject: [PATCH 168/171] drm/radeon: switch UVD code to use UVD_NO_OP for padding MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace packet2's with packet0 writes to UVD_NO_OP. The value written to UVD_NO_OP does not matter. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 2 +- drivers/gpu/drm/radeon/cikd.h | 1 + drivers/gpu/drm/radeon/evergreen.c | 2 +- drivers/gpu/drm/radeon/evergreend.h | 1 + drivers/gpu/drm/radeon/ni.c | 2 +- drivers/gpu/drm/radeon/nid.h | 1 + drivers/gpu/drm/radeon/r600.c | 2 +- drivers/gpu/drm/radeon/radeon_uvd.c | 6 ++++-- drivers/gpu/drm/radeon/rv770.c | 2 +- drivers/gpu/drm/radeon/rv770d.h | 1 + drivers/gpu/drm/radeon/si.c | 2 +- drivers/gpu/drm/radeon/sid.h | 1 + 12 files changed, 15 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index a0427f0adbab..b1784a1b482a 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -8207,7 +8207,7 @@ static void cik_uvd_resume(struct radeon_device *rdev) return; ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2); + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0)); if (r) { dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r); return; diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index cead2284fd79..48db93577c1d 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -2069,6 +2069,7 @@ #define UVD_UDEC_ADDR_CONFIG 0xef4c #define UVD_UDEC_DB_ADDR_CONFIG 0xef50 #define UVD_UDEC_DBW_ADDR_CONFIG 0xef54 +#define UVD_NO_OP 0xeffc #define UVD_LMI_EXT40_ADDR 0xf498 #define UVD_GP_SCRATCH4 0xf4e0 diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f95db0c1a17a..0b6b5766216f 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -5579,7 +5579,7 @@ static void evergreen_uvd_resume(struct radeon_device *rdev) return; ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2); + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0)); if (r) { dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r); return; diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index c8e3d394cde7..f3d88ca2aa8f 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -1523,6 +1523,7 @@ #define UVD_UDEC_ADDR_CONFIG 0xef4c #define UVD_UDEC_DB_ADDR_CONFIG 0xef50 #define UVD_UDEC_DBW_ADDR_CONFIG 0xef54 +#define UVD_NO_OP 0xeffc #define UVD_RBC_RB_RPTR 0xf690 #define UVD_RBC_RB_WPTR 0xf694 #define UVD_STATUS 0xf6bc diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 4a3d7cab83f7..103fc8650197 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -2062,7 +2062,7 @@ static void cayman_uvd_resume(struct radeon_device *rdev) return; ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2); + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0)); if (r) { dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r); return; diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h index 47eb49b77d32..3c9fec88ea44 100644 --- a/drivers/gpu/drm/radeon/nid.h +++ b/drivers/gpu/drm/radeon/nid.h @@ -1137,6 +1137,7 @@ #define UVD_UDEC_ADDR_CONFIG 0xEF4C #define UVD_UDEC_DB_ADDR_CONFIG 0xEF50 #define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54 +#define UVD_NO_OP 0xEFFC #define UVD_RBC_RB_RPTR 0xF690 #define UVD_RBC_RB_WPTR 0xF694 #define UVD_STATUS 0xf6bc diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 9247e7d207fe..640653606f86 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3097,7 +3097,7 @@ static void r600_uvd_resume(struct radeon_device *rdev) return; ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2); + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0)); if (r) { dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r); return; diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 1ae1f139f14e..0cd0e7bdee55 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -754,8 +754,10 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev, ib.ptr[3] = addr >> 32; ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0); ib.ptr[5] = 0; - for (i = 6; i < 16; ++i) - ib.ptr[i] = PACKET2(0); + for (i = 6; i < 16; i += 2) { + ib.ptr[i] = PACKET0(UVD_NO_OP, 0); + ib.ptr[i+1] = 0; + } ib.length_dw = 16; r = radeon_ib_schedule(rdev, &ib, NULL, false); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 1c120a4c3c97..729ae588c970 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -1738,7 +1738,7 @@ static void rv770_uvd_resume(struct radeon_device *rdev) return; ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2); + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0)); if (r) { dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r); return; diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index 9ef2064b1c9c..0271f4c559ae 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -387,6 +387,7 @@ #define UVD_UDEC_TILING_CONFIG 0xef40 #define UVD_UDEC_DB_TILING_CONFIG 0xef44 #define UVD_UDEC_DBW_TILING_CONFIG 0xef48 +#define UVD_NO_OP 0xeffc #define GC_USER_SHADER_PIPE_CONFIG 0x8954 #define INACTIVE_QD_PIPES(x) ((x) << 8) diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index ae5ee74e69a3..7ee9aafbdf74 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6920,7 +6920,7 @@ static void si_uvd_resume(struct radeon_device *rdev) return; ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2); + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0)); if (r) { dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r); return; diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index d1a7b58dd291..eb220eecba78 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -1559,6 +1559,7 @@ #define UVD_UDEC_ADDR_CONFIG 0xEF4C #define UVD_UDEC_DB_ADDR_CONFIG 0xEF50 #define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54 +#define UVD_NO_OP 0xEFFC #define UVD_RBC_RB_RPTR 0xF690 #define UVD_RBC_RB_WPTR 0xF694 #define UVD_STATUS 0xf6bc From 39a5830a05a1b599192346e832eb9fe4ff4f2164 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 22 Aug 2016 20:49:36 +0800 Subject: [PATCH 169/171] drm/amd/powerplay: delete useless code in iceland_hwmgr.c. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c index f48f3eac5ba4..8a7ada50551c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c @@ -2445,8 +2445,6 @@ static int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) /* set pcieDpmLevel to mid_pcie_level_enabled*/ data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled; - for (i = 0; i < dpm_table->sclk_table.count; i++) - /* level count will send to smc once at init smc table and never change*/ result = iceland_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end); From ba0b2275a6781b2f3919d931d63329b5548f6d5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 23 Aug 2016 11:00:17 +0200 Subject: [PATCH 170/171] drm/amdgpu: use memcpy_to/fromio for UVD fw upload MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Also use the firmware size from the header instead of calculating it. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index af408b6c6e05..cc766cc53a87 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -298,7 +298,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) if (!adev->uvd.saved_bo) return -ENOMEM; - memcpy(adev->uvd.saved_bo, ptr, size); + memcpy_fromio(adev->uvd.saved_bo, ptr, size); return 0; } @@ -315,7 +315,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) ptr = adev->uvd.cpu_addr; if (adev->uvd.saved_bo != NULL) { - memcpy(ptr, adev->uvd.saved_bo, size); + memcpy_toio(ptr, adev->uvd.saved_bo, size); kfree(adev->uvd.saved_bo); adev->uvd.saved_bo = NULL; } else { @@ -324,11 +324,11 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->uvd.fw->data; offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset, - (adev->uvd.fw->size) - offset); + memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset, + le32_to_cpu(hdr->ucode_size_bytes)); size -= le32_to_cpu(hdr->ucode_size_bytes); ptr += le32_to_cpu(hdr->ucode_size_bytes); - memset(ptr, 0, size); + memset_io(ptr, 0, size); } return 0; From 7b4d3e297e8a7d3b82e68231ff077e891c370349 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 23 Aug 2016 11:18:59 +0200 Subject: [PATCH 171/171] drm/amdgpu: use memcpy_toio for VCE firmware upload MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Try to be clean here, even when it's a noop on x86. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 05865ce35351..da52af2a935a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -282,8 +282,8 @@ int amdgpu_vce_resume(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->vce.fw->data; offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy(cpu_addr, (adev->vce.fw->data) + offset, - (adev->vce.fw->size) - offset); + memcpy_toio(cpu_addr, adev->vce.fw->data + offset, + adev->vce.fw->size - offset); amdgpu_bo_kunmap(adev->vce.vcpu_bo);