mirror of
https://github.com/torvalds/linux.git
synced 2024-12-16 16:12:52 +00:00
drm/radeon: Don't generate new fence for page flip.
Use the fence of the new frontbuffer, if any. Generating a new fence could cause us to wait for completely unrelated rendering to finish before performing the flip. Signed-off-by: Michel Dänzer <michel.daenzer@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
parent
5762a179b6
commit
fcc485d667
@ -282,7 +282,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
|
|||||||
spin_lock_irqsave(&rdev->ddev->event_lock, flags);
|
spin_lock_irqsave(&rdev->ddev->event_lock, flags);
|
||||||
work = radeon_crtc->unpin_work;
|
work = radeon_crtc->unpin_work;
|
||||||
if (work == NULL ||
|
if (work == NULL ||
|
||||||
!radeon_fence_signaled(work->fence)) {
|
(work->fence && !radeon_fence_signaled(work->fence))) {
|
||||||
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
|
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -348,7 +348,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
|
|||||||
struct radeon_framebuffer *new_radeon_fb;
|
struct radeon_framebuffer *new_radeon_fb;
|
||||||
struct drm_gem_object *obj;
|
struct drm_gem_object *obj;
|
||||||
struct radeon_bo *rbo;
|
struct radeon_bo *rbo;
|
||||||
struct radeon_fence *fence;
|
|
||||||
struct radeon_unpin_work *work;
|
struct radeon_unpin_work *work;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 tiling_flags, pitch_pixels;
|
u32 tiling_flags, pitch_pixels;
|
||||||
@ -359,16 +358,9 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
|
|||||||
if (work == NULL)
|
if (work == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
r = radeon_fence_create(rdev, &fence);
|
|
||||||
if (unlikely(r != 0)) {
|
|
||||||
kfree(work);
|
|
||||||
DRM_ERROR("flip queue: failed to create fence.\n");
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
work->event = event;
|
work->event = event;
|
||||||
work->rdev = rdev;
|
work->rdev = rdev;
|
||||||
work->crtc_id = radeon_crtc->crtc_id;
|
work->crtc_id = radeon_crtc->crtc_id;
|
||||||
work->fence = radeon_fence_ref(fence);
|
|
||||||
old_radeon_fb = to_radeon_framebuffer(crtc->fb);
|
old_radeon_fb = to_radeon_framebuffer(crtc->fb);
|
||||||
new_radeon_fb = to_radeon_framebuffer(fb);
|
new_radeon_fb = to_radeon_framebuffer(fb);
|
||||||
/* schedule unpin of the old buffer */
|
/* schedule unpin of the old buffer */
|
||||||
@ -377,6 +369,10 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
|
|||||||
drm_gem_object_reference(obj);
|
drm_gem_object_reference(obj);
|
||||||
rbo = gem_to_radeon_bo(obj);
|
rbo = gem_to_radeon_bo(obj);
|
||||||
work->old_rbo = rbo;
|
work->old_rbo = rbo;
|
||||||
|
obj = new_radeon_fb->obj;
|
||||||
|
rbo = gem_to_radeon_bo(obj);
|
||||||
|
if (rbo->tbo.sync_obj)
|
||||||
|
work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
|
||||||
INIT_WORK(&work->work, radeon_unpin_work_func);
|
INIT_WORK(&work->work, radeon_unpin_work_func);
|
||||||
|
|
||||||
/* We borrow the event spin lock for protecting unpin_work */
|
/* We borrow the event spin lock for protecting unpin_work */
|
||||||
@ -391,9 +387,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
|
|||||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||||
|
|
||||||
/* pin the new buffer */
|
/* pin the new buffer */
|
||||||
obj = new_radeon_fb->obj;
|
|
||||||
rbo = gem_to_radeon_bo(obj);
|
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
|
DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
|
||||||
work->old_rbo, rbo);
|
work->old_rbo, rbo);
|
||||||
|
|
||||||
@ -461,25 +454,11 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
|
|||||||
goto pflip_cleanup1;
|
goto pflip_cleanup1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* 32 ought to cover us */
|
|
||||||
r = radeon_ring_lock(rdev, 32);
|
|
||||||
if (r) {
|
|
||||||
DRM_ERROR("failed to lock the ring before flip\n");
|
|
||||||
goto pflip_cleanup2;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* emit the fence */
|
|
||||||
radeon_fence_emit(rdev, fence);
|
|
||||||
/* set the proper interrupt */
|
/* set the proper interrupt */
|
||||||
radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
|
radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
|
||||||
/* fire the ring */
|
|
||||||
radeon_ring_unlock_commit(rdev);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
pflip_cleanup2:
|
|
||||||
drm_vblank_put(dev, radeon_crtc->crtc_id);
|
|
||||||
|
|
||||||
pflip_cleanup1:
|
pflip_cleanup1:
|
||||||
r = radeon_bo_reserve(rbo, false);
|
r = radeon_bo_reserve(rbo, false);
|
||||||
if (unlikely(r != 0)) {
|
if (unlikely(r != 0)) {
|
||||||
@ -501,7 +480,7 @@ pflip_cleanup:
|
|||||||
unlock_free:
|
unlock_free:
|
||||||
drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
|
drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
|
||||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||||
radeon_fence_unref(&fence);
|
radeon_fence_unref(&work->fence);
|
||||||
kfree(work);
|
kfree(work);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
|
Loading…
Reference in New Issue
Block a user