drm/amdgpu: cleanup GMC v9 TLB invalidation
Move the kiq handling into amdgpu_virt.c and drop the fallback. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Emily Deng <Emily.Deng@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
5c76c6a897
commit
af5fe1e96a
@ -132,6 +132,46 @@ failed_kiq_write:
|
|||||||
pr_err("failed to write reg:%x\n", reg);
|
pr_err("failed to write reg:%x\n", reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
|
||||||
|
uint32_t reg0, uint32_t reg1,
|
||||||
|
uint32_t ref, uint32_t mask)
|
||||||
|
{
|
||||||
|
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
|
||||||
|
struct amdgpu_ring *ring = &kiq->ring;
|
||||||
|
signed long r, cnt = 0;
|
||||||
|
unsigned long flags;
|
||||||
|
uint32_t seq;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&kiq->ring_lock, flags);
|
||||||
|
amdgpu_ring_alloc(ring, 32);
|
||||||
|
amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
|
||||||
|
ref, mask);
|
||||||
|
amdgpu_fence_emit_polling(ring, &seq);
|
||||||
|
amdgpu_ring_commit(ring);
|
||||||
|
spin_unlock_irqrestore(&kiq->ring_lock, flags);
|
||||||
|
|
||||||
|
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
|
||||||
|
|
||||||
|
/* don't wait anymore for IRQ context */
|
||||||
|
if (r < 1 && in_interrupt())
|
||||||
|
goto failed_kiq;
|
||||||
|
|
||||||
|
might_sleep();
|
||||||
|
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
|
||||||
|
|
||||||
|
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
|
||||||
|
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cnt > MAX_KIQ_REG_TRY)
|
||||||
|
goto failed_kiq;
|
||||||
|
|
||||||
|
return;
|
||||||
|
|
||||||
|
failed_kiq:
|
||||||
|
pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_virt_request_full_gpu() - request full gpu access
|
* amdgpu_virt_request_full_gpu() - request full gpu access
|
||||||
* @amdgpu: amdgpu device.
|
* @amdgpu: amdgpu device.
|
||||||
|
@ -278,6 +278,9 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
|
|||||||
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
|
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
|
||||||
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
|
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
|
||||||
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
|
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
|
||||||
|
void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
|
||||||
|
uint32_t reg0, uint32_t rreg1,
|
||||||
|
uint32_t ref, uint32_t mask);
|
||||||
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
|
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
|
||||||
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
|
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
|
||||||
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
|
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
|
||||||
|
@ -312,48 +312,6 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
|
|||||||
return req;
|
return req;
|
||||||
}
|
}
|
||||||
|
|
||||||
static signed long amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
|
|
||||||
uint32_t reg0, uint32_t reg1,
|
|
||||||
uint32_t ref, uint32_t mask)
|
|
||||||
{
|
|
||||||
signed long r, cnt = 0;
|
|
||||||
unsigned long flags;
|
|
||||||
uint32_t seq;
|
|
||||||
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
|
|
||||||
struct amdgpu_ring *ring = &kiq->ring;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&kiq->ring_lock, flags);
|
|
||||||
|
|
||||||
amdgpu_ring_alloc(ring, 32);
|
|
||||||
amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
|
|
||||||
ref, mask);
|
|
||||||
amdgpu_fence_emit_polling(ring, &seq);
|
|
||||||
amdgpu_ring_commit(ring);
|
|
||||||
spin_unlock_irqrestore(&kiq->ring_lock, flags);
|
|
||||||
|
|
||||||
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
|
|
||||||
|
|
||||||
/* don't wait anymore for IRQ context */
|
|
||||||
if (r < 1 && in_interrupt())
|
|
||||||
goto failed_kiq;
|
|
||||||
|
|
||||||
might_sleep();
|
|
||||||
|
|
||||||
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
|
|
||||||
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
|
|
||||||
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cnt > MAX_KIQ_REG_TRY)
|
|
||||||
goto failed_kiq;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
failed_kiq:
|
|
||||||
pr_err("failed to invalidate tlb with kiq\n");
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* GART
|
* GART
|
||||||
* VMID 0 is the physical GPU addresses as used by the kernel.
|
* VMID 0 is the physical GPU addresses as used by the kernel.
|
||||||
@ -375,7 +333,6 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
|
|||||||
{
|
{
|
||||||
const unsigned eng = 17;
|
const unsigned eng = 17;
|
||||||
unsigned i, j;
|
unsigned i, j;
|
||||||
int r;
|
|
||||||
|
|
||||||
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
|
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
|
||||||
struct amdgpu_vmhub *hub = &adev->vmhub[i];
|
struct amdgpu_vmhub *hub = &adev->vmhub[i];
|
||||||
@ -384,10 +341,12 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
|
|||||||
if (adev->gfx.kiq.ring.sched.ready &&
|
if (adev->gfx.kiq.ring.sched.ready &&
|
||||||
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
|
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
|
||||||
!adev->in_gpu_reset) {
|
!adev->in_gpu_reset) {
|
||||||
r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng,
|
uint32_t req = hub->vm_inv_eng0_req + eng;
|
||||||
hub->vm_inv_eng0_ack + eng, tmp, 1 << vmid);
|
uint32_t ack = hub->vm_inv_eng0_ack + eng;
|
||||||
if (!r)
|
|
||||||
continue;
|
amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
|
||||||
|
1 << vmid);
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&adev->gmc.invalidate_lock);
|
spin_lock(&adev->gmc.invalidate_lock);
|
||||||
|
Loading…
Reference in New Issue
Block a user