mirror of
https://github.com/torvalds/linux.git
synced 2024-12-23 03:11:46 +00:00
drm/radeon: add error handling to fence_wait_empty_locked
Instead of returning the error handle it directly and while at it fix the comments about the ring lock. Signed-off-by: Christian König <deathsimple@vodafone.de> Reviewed-by: Michel Dänzer <michel.daenzer@amd.com> Reviewed-by: Jerome Glisse <jglisse@redhat.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
49099c4991
commit
7ecc45e3ef
@ -239,7 +239,7 @@ void radeon_fence_process(struct radeon_device *rdev, int ring);
|
||||
bool radeon_fence_signaled(struct radeon_fence *fence);
|
||||
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
|
||||
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
|
||||
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
|
||||
void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
|
||||
int radeon_fence_wait_any(struct radeon_device *rdev,
|
||||
struct radeon_fence **fences,
|
||||
bool intr);
|
||||
|
@ -440,14 +440,11 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* caller must hold ring lock */
|
||||
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
|
||||
{
|
||||
uint64_t seq;
|
||||
|
||||
/* We are not protected by ring lock when reading current seq but
|
||||
* it's ok as worst case is we return to early while we could have
|
||||
* wait.
|
||||
*/
|
||||
seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
|
||||
if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
|
||||
/* nothing to wait for, last_seq is
|
||||
@ -457,15 +454,27 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
|
||||
return radeon_fence_wait_seq(rdev, seq, ring, false, false);
|
||||
}
|
||||
|
||||
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
|
||||
/* caller must hold ring lock */
|
||||
void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
|
||||
{
|
||||
/* We are not protected by ring lock when reading current seq
|
||||
* but it's ok as wait empty is call from place where no more
|
||||
* activity can be scheduled so there won't be concurrent access
|
||||
* to seq value.
|
||||
*/
|
||||
return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].sync_seq[ring],
|
||||
ring, false, false);
|
||||
uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
|
||||
|
||||
while(1) {
|
||||
int r;
|
||||
r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
|
||||
if (r == -EDEADLK) {
|
||||
mutex_unlock(&rdev->ring_lock);
|
||||
r = radeon_gpu_reset(rdev);
|
||||
mutex_lock(&rdev->ring_lock);
|
||||
if (!r)
|
||||
continue;
|
||||
}
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "error waiting for ring to become"
|
||||
" idle (%d)\n", r);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
|
||||
|
Loading…
Reference in New Issue
Block a user