forked from Minki/linux
drm/amdgpu:cleanup force_completion
cleanups, now only operate on the given ring Signed-off-by: Monk Liu <Monk.Liu@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
d1f6dc1a9a
commit
2f9d4084ca
@ -2872,7 +2872,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
|
|||||||
amd_sched_hw_job_reset(&ring->sched);
|
amd_sched_hw_job_reset(&ring->sched);
|
||||||
|
|
||||||
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
||||||
amdgpu_fence_driver_force_completion_ring(ring);
|
amdgpu_fence_driver_force_completion(ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* request to take full control of GPU before re-initialization */
|
/* request to take full control of GPU before re-initialization */
|
||||||
@ -2991,9 +2991,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
|||||||
continue;
|
continue;
|
||||||
kthread_park(ring->sched.thread);
|
kthread_park(ring->sched.thread);
|
||||||
amd_sched_hw_job_reset(&ring->sched);
|
amd_sched_hw_job_reset(&ring->sched);
|
||||||
}
|
|
||||||
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
||||||
amdgpu_fence_driver_force_completion(adev);
|
amdgpu_fence_driver_force_completion(ring);
|
||||||
|
}
|
||||||
|
|
||||||
need_full_reset = amdgpu_need_full_reset(adev);
|
need_full_reset = amdgpu_need_full_reset(adev);
|
||||||
|
|
||||||
|
@ -499,7 +499,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
|
|||||||
r = amdgpu_fence_wait_empty(ring);
|
r = amdgpu_fence_wait_empty(ring);
|
||||||
if (r) {
|
if (r) {
|
||||||
/* no need to trigger GPU reset as we are unloading */
|
/* no need to trigger GPU reset as we are unloading */
|
||||||
amdgpu_fence_driver_force_completion(adev);
|
amdgpu_fence_driver_force_completion(ring);
|
||||||
}
|
}
|
||||||
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
|
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
|
||||||
ring->fence_drv.irq_type);
|
ring->fence_drv.irq_type);
|
||||||
@ -534,7 +534,7 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
|
|||||||
r = amdgpu_fence_wait_empty(ring);
|
r = amdgpu_fence_wait_empty(ring);
|
||||||
if (r) {
|
if (r) {
|
||||||
/* delay GPU reset to resume */
|
/* delay GPU reset to resume */
|
||||||
amdgpu_fence_driver_force_completion(adev);
|
amdgpu_fence_driver_force_completion(ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* disable the interrupt */
|
/* disable the interrupt */
|
||||||
@ -571,30 +571,15 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_fence_driver_force_completion - force all fence waiter to complete
|
* amdgpu_fence_driver_force_completion - force signal latest fence of ring
|
||||||
*
|
*
|
||||||
* @adev: amdgpu device pointer
|
* @ring: fence of the ring to signal
|
||||||
*
|
*
|
||||||
* In case of GPU reset failure make sure no process keep waiting on fence
|
|
||||||
* that will never complete.
|
|
||||||
*/
|
*/
|
||||||
void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
|
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
|
||||||
{
|
{
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
|
|
||||||
struct amdgpu_ring *ring = adev->rings[i];
|
|
||||||
if (!ring || !ring->fence_drv.initialized)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void amdgpu_fence_driver_force_completion_ring(struct amdgpu_ring *ring)
|
|
||||||
{
|
|
||||||
if (ring)
|
|
||||||
amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
|
amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
|
||||||
|
amdgpu_fence_process(ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -79,8 +79,7 @@ struct amdgpu_fence_driver {
|
|||||||
|
|
||||||
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
|
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
|
||||||
void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
|
void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
|
||||||
void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
|
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
|
||||||
void amdgpu_fence_driver_force_completion_ring(struct amdgpu_ring *ring);
|
|
||||||
|
|
||||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
||||||
unsigned num_hw_submission);
|
unsigned num_hw_submission);
|
||||||
|
Loading…
Reference in New Issue
Block a user