drm/amdgpu: apply nbio7 for Raven (v3)
nbio handles misc bus io operations. Handle differences between different nbio bus versions. v2: switch checks from RAVEN to APU (Alex) squash in raven rev id fetch squash in fix uninitalized hdp flush reg index for raven v3: add some missed RAVEN to APU checks (Alex) Signed-off-by: Chunming Zhou <David1.Zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
954d5d437f
commit
aecbe64f2b
@ -33,6 +33,7 @@
|
||||
#include "soc15_common.h"
|
||||
|
||||
#include "nbio_v6_1.h"
|
||||
#include "nbio_v7_0.h"
|
||||
#include "gfxhub_v1_0.h"
|
||||
#include "mmhub_v1_0.h"
|
||||
|
||||
@ -215,7 +216,10 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
|
||||
unsigned i, j;
|
||||
|
||||
/* flush hdp cache */
|
||||
nbio_v6_1_hdp_flush(adev);
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
nbio_v7_0_hdp_flush(adev);
|
||||
else
|
||||
nbio_v6_1_hdp_flush(adev);
|
||||
|
||||
spin_lock(&adev->mc.invalidate_lock);
|
||||
|
||||
@ -479,7 +483,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
|
||||
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
|
||||
/* size in MB on si */
|
||||
adev->mc.mc_vram_size =
|
||||
nbio_v6_1_get_memsize(adev) * 1024ULL * 1024ULL;
|
||||
((adev->flags & AMD_IS_APU) ? nbio_v7_0_get_memsize(adev) :
|
||||
nbio_v6_1_get_memsize(adev)) * 1024ULL * 1024ULL;
|
||||
adev->mc.real_vram_size = adev->mc.mc_vram_size;
|
||||
adev->mc.visible_vram_size = adev->mc.aper_size;
|
||||
|
||||
@ -718,7 +723,10 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
|
||||
return r;
|
||||
|
||||
/* After HDP is initialized, flush HDP.*/
|
||||
nbio_v6_1_hdp_flush(adev);
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
nbio_v7_0_hdp_flush(adev);
|
||||
else
|
||||
nbio_v6_1_hdp_flush(adev);
|
||||
|
||||
r = gfxhub_v1_0_gart_enable(adev);
|
||||
if (r)
|
||||
|
@ -386,7 +386,9 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||
u32 ref_and_mask = 0;
|
||||
struct nbio_hdp_flush_reg *nbio_hf_reg;
|
||||
|
||||
if (ring->adev->asic_type == CHIP_VEGA10)
|
||||
if (ring->adev->flags & AMD_IS_APU)
|
||||
nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
|
||||
else
|
||||
nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
|
||||
|
||||
if (ring == &ring->adev->sdma.instance[0].ring)
|
||||
@ -617,7 +619,10 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
|
||||
}
|
||||
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL), doorbell);
|
||||
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
|
||||
nbio_v6_1_sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index);
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
nbio_v7_0_sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index);
|
||||
else
|
||||
nbio_v6_1_sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index);
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
sdma_v4_0_ring_set_wptr(ring);
|
||||
|
@ -104,10 +104,10 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
|
||||
u32 r;
|
||||
struct nbio_pcie_index_data *nbio_pcie_id;
|
||||
|
||||
if (adev->asic_type == CHIP_VEGA10)
|
||||
nbio_pcie_id = &nbio_v6_1_pcie_index_data;
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
nbio_pcie_id = &nbio_v7_0_pcie_index_data;
|
||||
else
|
||||
BUG();
|
||||
nbio_pcie_id = &nbio_v6_1_pcie_index_data;
|
||||
|
||||
address = nbio_pcie_id->index_offset;
|
||||
data = nbio_pcie_id->data_offset;
|
||||
@ -125,10 +125,10 @@ static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
||||
unsigned long flags, address, data;
|
||||
struct nbio_pcie_index_data *nbio_pcie_id;
|
||||
|
||||
if (adev->asic_type == CHIP_VEGA10)
|
||||
nbio_pcie_id = &nbio_v6_1_pcie_index_data;
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
nbio_pcie_id = &nbio_v7_0_pcie_index_data;
|
||||
else
|
||||
BUG();
|
||||
nbio_pcie_id = &nbio_v6_1_pcie_index_data;
|
||||
|
||||
address = nbio_pcie_id->index_offset;
|
||||
data = nbio_pcie_id->data_offset;
|
||||
@ -199,7 +199,10 @@ static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
||||
|
||||
static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
|
||||
{
|
||||
return nbio_v6_1_get_memsize(adev);
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return nbio_v7_0_get_memsize(adev);
|
||||
else
|
||||
return nbio_v6_1_get_memsize(adev);
|
||||
}
|
||||
|
||||
static const u32 vega10_golden_init[] =
|
||||
@ -376,7 +379,10 @@ static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev)
|
||||
|
||||
/* wait for asic to come out of reset */
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (nbio_v6_1_get_memsize(adev) != 0xffffffff)
|
||||
u32 memsize = (adev->flags & AMD_IS_APU) ?
|
||||
nbio_v7_0_get_memsize(adev) :
|
||||
nbio_v6_1_get_memsize(adev);
|
||||
if (memsize != 0xffffffff)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
@ -450,8 +456,12 @@ static void soc15_program_aspm(struct amdgpu_device *adev)
|
||||
static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
nbio_v6_1_enable_doorbell_aperture(adev, enable);
|
||||
nbio_v6_1_enable_doorbell_selfring_aperture(adev, enable);
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
nbio_v7_0_enable_doorbell_aperture(adev, enable);
|
||||
} else {
|
||||
nbio_v6_1_enable_doorbell_aperture(adev, enable);
|
||||
nbio_v6_1_enable_doorbell_selfring_aperture(adev, enable);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct amdgpu_ip_block_version vega10_common_ip_block =
|
||||
@ -506,7 +516,10 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
|
||||
|
||||
static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
|
||||
{
|
||||
return nbio_v6_1_get_rev_id(adev);
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return nbio_v7_0_get_rev_id(adev);
|
||||
else
|
||||
return nbio_v6_1_get_rev_id(adev);
|
||||
}
|
||||
|
||||
|
||||
@ -557,6 +570,9 @@ static int soc15_common_early_init(void *handle)
|
||||
case CHIP_VEGA10:
|
||||
nbio_v6_1_init(adev);
|
||||
break;
|
||||
case CHIP_RAVEN:
|
||||
nbio_v7_0_init(adev);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -25,6 +25,7 @@
|
||||
#define __SOC15_H__
|
||||
|
||||
#include "nbio_v6_1.h"
|
||||
#include "nbio_v7_0.h"
|
||||
|
||||
extern const struct amd_ip_funcs soc15_common_ip_funcs;
|
||||
|
||||
|
@ -97,7 +97,10 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
|
||||
/* disable irqs */
|
||||
vega10_ih_disable_interrupts(adev);
|
||||
|
||||
nbio_v6_1_ih_control(adev);
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
nbio_v7_0_ih_control(adev);
|
||||
else
|
||||
nbio_v6_1_ih_control(adev);
|
||||
|
||||
ih_rb_cntl = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
|
||||
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
|
||||
@ -148,7 +151,10 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
|
||||
ENABLE, 0);
|
||||
}
|
||||
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR), ih_doorbell_rtpr);
|
||||
nbio_v6_1_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
nbio_v7_0_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
|
||||
else
|
||||
nbio_v6_1_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
|
||||
|
||||
tmp = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL));
|
||||
tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
|
||||
|
Loading…
Reference in New Issue
Block a user