mirror of
https://github.com/torvalds/linux.git
synced 2024-11-08 21:21:47 +00:00
drm/radeon: add spinlocks for indirect register accesss
This adds spinlocks to protect access to other indirect register apertures. These indirect spaces are used pretty infrequently and we haven't had an reported problems, but better safe than sorry. Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
fe78118c46
commit
0a5b7b0bd9
@ -122,20 +122,27 @@ int kv_get_temp(struct radeon_device *rdev)
|
||||
*/
|
||||
u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 r;
|
||||
|
||||
spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
|
||||
WREG32(PCIE_INDEX, reg);
|
||||
(void)RREG32(PCIE_INDEX);
|
||||
r = RREG32(PCIE_DATA);
|
||||
spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
|
||||
return r;
|
||||
}
|
||||
|
||||
void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
|
||||
WREG32(PCIE_INDEX, reg);
|
||||
(void)RREG32(PCIE_INDEX);
|
||||
WREG32(PCIE_DATA, v);
|
||||
(void)RREG32(PCIE_DATA);
|
||||
spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
|
||||
}
|
||||
|
||||
static const u32 spectre_rlc_save_restore_register_list[] =
|
||||
|
@ -28,22 +28,30 @@
|
||||
static u32 dce6_endpoint_rreg(struct radeon_device *rdev,
|
||||
u32 block_offset, u32 reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 r;
|
||||
|
||||
spin_lock_irqsave(&rdev->end_idx_lock, flags);
|
||||
WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
|
||||
r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset);
|
||||
spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static void dce6_endpoint_wreg(struct radeon_device *rdev,
|
||||
u32 block_offset, u32 reg, u32 v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rdev->end_idx_lock, flags);
|
||||
if (ASIC_IS_DCE8(rdev))
|
||||
WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
|
||||
else
|
||||
WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset,
|
||||
AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg));
|
||||
WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v);
|
||||
spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
|
||||
}
|
||||
|
||||
#define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg))
|
||||
|
@ -2853,21 +2853,28 @@ static void r100_pll_errata_after_data(struct radeon_device *rdev)
|
||||
|
||||
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
uint32_t data;
|
||||
|
||||
spin_lock_irqsave(&rdev->pll_idx_lock, flags);
|
||||
WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
|
||||
r100_pll_errata_after_index(rdev);
|
||||
data = RREG32(RADEON_CLOCK_CNTL_DATA);
|
||||
r100_pll_errata_after_data(rdev);
|
||||
spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
|
||||
return data;
|
||||
}
|
||||
|
||||
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rdev->pll_idx_lock, flags);
|
||||
WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
|
||||
r100_pll_errata_after_index(rdev);
|
||||
WREG32(RADEON_CLOCK_CNTL_DATA, v);
|
||||
r100_pll_errata_after_data(rdev);
|
||||
spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
|
||||
}
|
||||
|
||||
static void r100_set_safe_registers(struct radeon_device *rdev)
|
||||
|
@ -160,18 +160,25 @@ void r420_pipes_init(struct radeon_device *rdev)
|
||||
|
||||
u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 r;
|
||||
|
||||
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
|
||||
WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg));
|
||||
r = RREG32(R_0001FC_MC_IND_DATA);
|
||||
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
|
||||
return r;
|
||||
}
|
||||
|
||||
void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
|
||||
WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) |
|
||||
S_0001F8_MC_IND_WR_EN(1));
|
||||
WREG32(R_0001FC_MC_IND_DATA, v);
|
||||
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
|
||||
}
|
||||
|
||||
static void r420_debugfs(struct radeon_device *rdev)
|
||||
|
@ -1045,20 +1045,27 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev)
|
||||
|
||||
uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
uint32_t r;
|
||||
|
||||
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
|
||||
WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
|
||||
r = RREG32(R_0028FC_MC_DATA);
|
||||
WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
|
||||
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
|
||||
return r;
|
||||
}
|
||||
|
||||
void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
|
||||
WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
|
||||
S_0028F8_MC_IND_WR_EN(1));
|
||||
WREG32(R_0028FC_MC_DATA, v);
|
||||
WREG32(R_0028F8_MC_INDEX, 0x7F);
|
||||
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
|
||||
}
|
||||
|
||||
static void r600_mc_program(struct radeon_device *rdev)
|
||||
@ -2092,20 +2099,27 @@ static void r600_gpu_init(struct radeon_device *rdev)
|
||||
*/
|
||||
u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 r;
|
||||
|
||||
spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
|
||||
WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
|
||||
(void)RREG32(PCIE_PORT_INDEX);
|
||||
r = RREG32(PCIE_PORT_DATA);
|
||||
spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
|
||||
return r;
|
||||
}
|
||||
|
||||
void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
|
||||
WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
|
||||
(void)RREG32(PCIE_PORT_INDEX);
|
||||
WREG32(PCIE_PORT_DATA, (v));
|
||||
(void)RREG32(PCIE_PORT_DATA);
|
||||
spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2112,6 +2112,26 @@ struct radeon_device {
|
||||
spinlock_t mmio_idx_lock;
|
||||
/* protects concurrent SMC based register access */
|
||||
spinlock_t smc_idx_lock;
|
||||
/* protects concurrent PLL register access */
|
||||
spinlock_t pll_idx_lock;
|
||||
/* protects concurrent MC register access */
|
||||
spinlock_t mc_idx_lock;
|
||||
/* protects concurrent PCIE register access */
|
||||
spinlock_t pcie_idx_lock;
|
||||
/* protects concurrent PCIE_PORT register access */
|
||||
spinlock_t pciep_idx_lock;
|
||||
/* protects concurrent PIF register access */
|
||||
spinlock_t pif_idx_lock;
|
||||
/* protects concurrent CG register access */
|
||||
spinlock_t cg_idx_lock;
|
||||
/* protects concurrent UVD register access */
|
||||
spinlock_t uvd_idx_lock;
|
||||
/* protects concurrent RCU register access */
|
||||
spinlock_t rcu_idx_lock;
|
||||
/* protects concurrent DIDT register access */
|
||||
spinlock_t didt_idx_lock;
|
||||
/* protects concurrent ENDPOINT (audio) register access */
|
||||
spinlock_t end_idx_lock;
|
||||
void __iomem *rmmio;
|
||||
radeon_rreg_t mc_rreg;
|
||||
radeon_wreg_t mc_wreg;
|
||||
@ -2279,17 +2299,24 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
|
||||
*/
|
||||
static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
uint32_t r;
|
||||
|
||||
spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
|
||||
WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
|
||||
r = RREG32(RADEON_PCIE_DATA);
|
||||
spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
|
||||
WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
|
||||
WREG32(RADEON_PCIE_DATA, (v));
|
||||
spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
|
||||
}
|
||||
|
||||
static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
|
||||
@ -2316,93 +2343,135 @@ static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
|
||||
|
||||
static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 r;
|
||||
|
||||
spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
|
||||
WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
|
||||
r = RREG32(R600_RCU_DATA);
|
||||
spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
|
||||
WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
|
||||
WREG32(R600_RCU_DATA, (v));
|
||||
spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
|
||||
}
|
||||
|
||||
static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 r;
|
||||
|
||||
spin_lock_irqsave(&rdev->cg_idx_lock, flags);
|
||||
WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
|
||||
r = RREG32(EVERGREEN_CG_IND_DATA);
|
||||
spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rdev->cg_idx_lock, flags);
|
||||
WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
|
||||
WREG32(EVERGREEN_CG_IND_DATA, (v));
|
||||
spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
|
||||
}
|
||||
|
||||
static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 r;
|
||||
|
||||
spin_lock_irqsave(&rdev->pif_idx_lock, flags);
|
||||
WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
|
||||
r = RREG32(EVERGREEN_PIF_PHY0_DATA);
|
||||
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rdev->pif_idx_lock, flags);
|
||||
WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
|
||||
WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
|
||||
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
|
||||
}
|
||||
|
||||
static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 r;
|
||||
|
||||
spin_lock_irqsave(&rdev->pif_idx_lock, flags);
|
||||
WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
|
||||
r = RREG32(EVERGREEN_PIF_PHY1_DATA);
|
||||
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rdev->pif_idx_lock, flags);
|
||||
WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
|
||||
WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
|
||||
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
|
||||
}
|
||||
|
||||
static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 r;
|
||||
|
||||
spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
|
||||
WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
|
||||
r = RREG32(R600_UVD_CTX_DATA);
|
||||
spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
|
||||
WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
|
||||
WREG32(R600_UVD_CTX_DATA, (v));
|
||||
spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
|
||||
}
|
||||
|
||||
|
||||
static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 r;
|
||||
|
||||
spin_lock_irqsave(&rdev->didt_idx_lock, flags);
|
||||
WREG32(CIK_DIDT_IND_INDEX, (reg));
|
||||
r = RREG32(CIK_DIDT_IND_DATA);
|
||||
spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rdev->didt_idx_lock, flags);
|
||||
WREG32(CIK_DIDT_IND_INDEX, (reg));
|
||||
WREG32(CIK_DIDT_IND_DATA, (v));
|
||||
spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
|
||||
}
|
||||
|
||||
void r100_pll_errata_after_index(struct radeon_device *rdev);
|
||||
|
@ -1250,6 +1250,16 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||
/* TODO: block userspace mapping of io register */
|
||||
spin_lock_init(&rdev->mmio_idx_lock);
|
||||
spin_lock_init(&rdev->smc_idx_lock);
|
||||
spin_lock_init(&rdev->pll_idx_lock);
|
||||
spin_lock_init(&rdev->mc_idx_lock);
|
||||
spin_lock_init(&rdev->pcie_idx_lock);
|
||||
spin_lock_init(&rdev->pciep_idx_lock);
|
||||
spin_lock_init(&rdev->pif_idx_lock);
|
||||
spin_lock_init(&rdev->cg_idx_lock);
|
||||
spin_lock_init(&rdev->uvd_idx_lock);
|
||||
spin_lock_init(&rdev->rcu_idx_lock);
|
||||
spin_lock_init(&rdev->didt_idx_lock);
|
||||
spin_lock_init(&rdev->end_idx_lock);
|
||||
if (rdev->family >= CHIP_BONAIRE) {
|
||||
rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
|
||||
rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
|
||||
|
@ -274,19 +274,26 @@ static void rs400_mc_init(struct radeon_device *rdev)
|
||||
|
||||
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
uint32_t r;
|
||||
|
||||
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
|
||||
WREG32(RS480_NB_MC_INDEX, reg & 0xff);
|
||||
r = RREG32(RS480_NB_MC_DATA);
|
||||
WREG32(RS480_NB_MC_INDEX, 0xff);
|
||||
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
|
||||
return r;
|
||||
}
|
||||
|
||||
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
|
||||
WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
|
||||
WREG32(RS480_NB_MC_DATA, (v));
|
||||
WREG32(RS480_NB_MC_INDEX, 0xff);
|
||||
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
@ -847,16 +847,26 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
|
||||
|
||||
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 r;
|
||||
|
||||
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
|
||||
WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
|
||||
S_000070_MC_IND_CITF_ARB0(1));
|
||||
return RREG32(R_000074_MC_IND_DATA);
|
||||
r = RREG32(R_000074_MC_IND_DATA);
|
||||
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
|
||||
return r;
|
||||
}
|
||||
|
||||
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
|
||||
WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
|
||||
S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
|
||||
WREG32(R_000074_MC_IND_DATA, v);
|
||||
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
|
||||
}
|
||||
|
||||
static void rs600_debugfs(struct radeon_device *rdev)
|
||||
|
@ -631,20 +631,27 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
|
||||
|
||||
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
uint32_t r;
|
||||
|
||||
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
|
||||
WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
|
||||
r = RREG32(R_00007C_MC_DATA);
|
||||
WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
|
||||
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
|
||||
return r;
|
||||
}
|
||||
|
||||
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
|
||||
WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
|
||||
S_000078_MC_IND_WR_EN(1));
|
||||
WREG32(R_00007C_MC_DATA, v);
|
||||
WREG32(R_000078_MC_INDEX, 0x7F);
|
||||
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
|
||||
}
|
||||
|
||||
static void rs690_mc_program(struct radeon_device *rdev)
|
||||
|
@ -209,19 +209,27 @@ static void rv515_mc_init(struct radeon_device *rdev)
|
||||
|
||||
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
uint32_t r;
|
||||
|
||||
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
|
||||
WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
|
||||
r = RREG32(MC_IND_DATA);
|
||||
WREG32(MC_IND_INDEX, 0);
|
||||
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
|
||||
WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
|
||||
WREG32(MC_IND_DATA, (v));
|
||||
WREG32(MC_IND_INDEX, 0);
|
||||
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
Loading…
Reference in New Issue
Block a user