drm/radeon: drop radeon_ring_force_activity

The reason for the false positives was fixed quite some time ago and since
most engines can still execute NOPs while being locked up it leads to false
negatives.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2014-02-18 12:37:50 +01:00
parent ff212f25fe
commit 2d2fe3f9b6
13 changed files with 1 additions and 60 deletions

View File

@ -5118,8 +5118,6 @@ bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_lockup_update(rdev, ring);
return false;
}
/* force CP activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}

View File

@ -714,8 +714,6 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_lockup_update(rdev, ring);
return false;
}
/* force ring activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}

View File

@ -3953,8 +3953,6 @@ bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
radeon_ring_lockup_update(rdev, ring);
return false;
}
/* force CP activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}

View File

@ -177,8 +177,6 @@ bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
radeon_ring_lockup_update(rdev, ring);
return false;
}
/* force ring activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}

View File

@ -1920,8 +1920,6 @@ bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_lockup_update(rdev, ring);
return false;
}
/* force CP activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}

View File

@ -303,8 +303,6 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_lockup_update(rdev, ring);
return false;
}
/* force ring activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}

View File

@ -2525,8 +2525,6 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_lockup_update(rdev, ring);
return false;
}
/* force CP activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}

View File

@ -1751,8 +1751,6 @@ bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_lockup_update(rdev, ring);
return false;
}
/* force CP activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}

View File

@ -222,8 +222,6 @@ bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_lockup_update(rdev, ring);
return false;
}
/* force ring activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}

View File

@ -956,7 +956,6 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *c
void radeon_ring_undo(struct radeon_ring *ring);
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
void radeon_ring_lockup_update(struct radeon_device *rdev,
struct radeon_ring *ring);
bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);

View File

@ -478,28 +478,6 @@ void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *rin
mutex_unlock(&rdev->ring_lock);
}
/**
* radeon_ring_force_activity - add some nop packets to the ring
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Add some nop packets to the ring to force activity (all asics).
* Used for lockup detection to see if the rptr is advancing.
*/
void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
{
int r;
if (radeon_ring_get_rptr(rdev, ring) == ring->wptr) {
r = radeon_ring_alloc(rdev, ring, 1);
if (!r) {
radeon_ring_write(ring, ring->nop);
radeon_ring_commit(rdev, ring);
}
}
}
/**
* radeon_ring_lockup_update - update lockup variables
*
@ -519,21 +497,7 @@ void radeon_ring_lockup_update(struct radeon_device *rdev,
* @rdev: radeon device structure
* @ring: radeon_ring structure holding ring information
*
* We don't need to initialize the lockup tracking information as we will either
* have CP rptr to a different value of jiffies wrap around which will force
* initialization of the lockup tracking informations.
*
* A possible false positivie is if we get call after while and last_cp_rptr ==
* the current CP rptr, even if it's unlikely it might happen. To avoid this
* if the elapsed time since last call is bigger than 2 second than we return
* false and update the tracking information. Due to this the caller must call
* radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
* the fencing code should be cautious about that.
*
* Caller should write to the ring to force CP to do something so we don't get
* false positive when CP is just gived nothing to do.
*
**/
*/
bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
uint32_t rptr = radeon_ring_get_rptr(rdev, ring);

View File

@ -3869,8 +3869,6 @@ bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_lockup_update(rdev, ring);
return false;
}
/* force CP activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}

View File

@ -52,8 +52,6 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_lockup_update(rdev, ring);
return false;
}
/* force ring activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}