forked from Minki/linux
drm/amdgpu: remove duplicate amdgpu_fence_process implementation
Looks like that somehow got missed while during porting the radeon changes. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
02bc0650bc
commit
68ed3de434
@ -294,65 +294,8 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
|
|||||||
*/
|
*/
|
||||||
void amdgpu_fence_process(struct amdgpu_ring *ring)
|
void amdgpu_fence_process(struct amdgpu_ring *ring)
|
||||||
{
|
{
|
||||||
uint64_t seq, last_seq, last_emitted;
|
if (amdgpu_fence_activity(ring))
|
||||||
unsigned count_loop = 0;
|
|
||||||
bool wake = false;
|
|
||||||
unsigned long irqflags;
|
|
||||||
|
|
||||||
/* Note there is a scenario here for an infinite loop but it's
|
|
||||||
* very unlikely to happen. For it to happen, the current polling
|
|
||||||
* process need to be interrupted by another process and another
|
|
||||||
* process needs to update the last_seq btw the atomic read and
|
|
||||||
* xchg of the current process.
|
|
||||||
*
|
|
||||||
* More over for this to go in infinite loop there need to be
|
|
||||||
* continuously new fence signaled ie amdgpu_fence_read needs
|
|
||||||
* to return a different value each time for both the currently
|
|
||||||
* polling process and the other process that xchg the last_seq
|
|
||||||
* btw atomic read and xchg of the current process. And the
|
|
||||||
* value the other process set as last seq must be higher than
|
|
||||||
* the seq value we just read. Which means that current process
|
|
||||||
* need to be interrupted after amdgpu_fence_read and before
|
|
||||||
* atomic xchg.
|
|
||||||
*
|
|
||||||
* To be even more safe we count the number of time we loop and
|
|
||||||
* we bail after 10 loop just accepting the fact that we might
|
|
||||||
* have temporarly set the last_seq not to the true real last
|
|
||||||
* seq but to an older one.
|
|
||||||
*/
|
|
||||||
spin_lock_irqsave(&ring->fence_lock, irqflags);
|
|
||||||
last_seq = atomic64_read(&ring->fence_drv.last_seq);
|
|
||||||
do {
|
|
||||||
last_emitted = ring->fence_drv.sync_seq[ring->idx];
|
|
||||||
seq = amdgpu_fence_read(ring);
|
|
||||||
seq |= last_seq & 0xffffffff00000000LL;
|
|
||||||
if (seq < last_seq) {
|
|
||||||
seq &= 0xffffffff;
|
|
||||||
seq |= last_emitted & 0xffffffff00000000LL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (seq <= last_seq || seq > last_emitted) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
/* If we loop over we don't want to return without
|
|
||||||
* checking if a fence is signaled as it means that the
|
|
||||||
* seq we just read is different from the previous on.
|
|
||||||
*/
|
|
||||||
wake = true;
|
|
||||||
last_seq = seq;
|
|
||||||
if ((count_loop++) > 10) {
|
|
||||||
/* We looped over too many time leave with the
|
|
||||||
* fact that we might have set an older fence
|
|
||||||
* seq then the current real last seq as signaled
|
|
||||||
* by the hw.
|
|
||||||
*/
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
|
|
||||||
|
|
||||||
if (wake)
|
|
||||||
wake_up_all(&ring->fence_drv.fence_queue);
|
wake_up_all(&ring->fence_drv.fence_queue);
|
||||||
spin_unlock_irqrestore(&ring->fence_lock, irqflags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
Loading…
Reference in New Issue
Block a user