mirror of
https://github.com/torvalds/linux.git
synced 2024-12-14 15:13:52 +00:00
drm/radeon: Prevent too early kms-pageflips triggered by vblank.
Since 3.16-rc1 we have this new failure: When the userspace XOrg ddx schedules vblank events to trigger deferred kms-pageflips, e.g., via the OML_sync_control extension call glXSwapBuffersMscOML(), or if a glXSwapBuffers() is called immediately after completion of a previous swapbuffers call, e.g., in a tight rendering loop with minimal rendering, it happens frequently that the pageflip ioctl() is executed within the same vblank in which a previous kms-pageflip completed, or - for deferred swaps - always one vblank earlier than requested by the client app. This causes premature pageflips and detection of failure by the ddx, e.g., XOrg log warnings like... "(WW) RADEON(1): radeon_dri2_flip_event_handler: Pageflip completion event has impossible msc 201025 < target_msc 201026" ... and error/invalid return values of glXWaitForSbcOML() and Intel_swap_events extension. Reason is the new way in which kms-pageflips are programmed since 3.16. This commit changes the time window in which the hw can execute pending programmed pageflips. Before, a pending flip would get executed anywhere within the vblank interval. Now a pending flip only gets executed at the leading edge of vblank (start of front porch), making sure that a invocation of the pageflip ioctl() within a given vblank interval will only lead to pageflip completion in the following vblank. Tested to death on a DCE-4 card. Signed-off-by: Mario Kleiner <mario.kleiner.de@gmail.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
201bb62402
commit
f53f81b257
@ -1414,8 +1414,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
|
|||||||
tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
|
tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
|
||||||
WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
|
WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
|
||||||
|
|
||||||
/* set pageflip to happen anywhere in vblank interval */
|
/* set pageflip to happen only at start of vblank interval (front porch) */
|
||||||
WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
|
WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
|
||||||
|
|
||||||
if (!atomic && fb && fb != crtc->primary->fb) {
|
if (!atomic && fb && fb != crtc->primary->fb) {
|
||||||
radeon_fb = to_radeon_framebuffer(fb);
|
radeon_fb = to_radeon_framebuffer(fb);
|
||||||
@ -1614,8 +1614,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
|
|||||||
tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
|
tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
|
||||||
WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
|
WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
|
||||||
|
|
||||||
/* set pageflip to happen anywhere in vblank interval */
|
/* set pageflip to happen only at start of vblank interval (front porch) */
|
||||||
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
|
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
|
||||||
|
|
||||||
if (!atomic && fb && fb != crtc->primary->fb) {
|
if (!atomic && fb && fb != crtc->primary->fb) {
|
||||||
radeon_fb = to_radeon_framebuffer(fb);
|
radeon_fb = to_radeon_framebuffer(fb);
|
||||||
|
@ -2642,8 +2642,9 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
|
|||||||
for (i = 0; i < rdev->num_crtc; i++) {
|
for (i = 0; i < rdev->num_crtc; i++) {
|
||||||
if (save->crtc_enabled[i]) {
|
if (save->crtc_enabled[i]) {
|
||||||
tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
|
tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
|
||||||
if ((tmp & 0x3) != 0) {
|
if ((tmp & 0x7) != 3) {
|
||||||
tmp &= ~0x3;
|
tmp &= ~0x7;
|
||||||
|
tmp |= 0x3;
|
||||||
WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
|
WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
|
||||||
}
|
}
|
||||||
tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
|
tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
|
||||||
|
@ -239,7 +239,6 @@
|
|||||||
# define EVERGREEN_CRTC_V_BLANK (1 << 0)
|
# define EVERGREEN_CRTC_V_BLANK (1 << 0)
|
||||||
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
|
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
|
||||||
#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
|
#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
|
||||||
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
|
|
||||||
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
|
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
|
||||||
#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
|
#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
|
||||||
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
|
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
|
||||||
|
@ -406,8 +406,9 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
|
|||||||
for (i = 0; i < rdev->num_crtc; i++) {
|
for (i = 0; i < rdev->num_crtc; i++) {
|
||||||
if (save->crtc_enabled[i]) {
|
if (save->crtc_enabled[i]) {
|
||||||
tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
|
tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
|
||||||
if ((tmp & 0x3) != 0) {
|
if ((tmp & 0x7) != 3) {
|
||||||
tmp &= ~0x3;
|
tmp &= ~0x7;
|
||||||
|
tmp |= 0x3;
|
||||||
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
|
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
|
||||||
}
|
}
|
||||||
tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
|
tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
|
||||||
|
Loading…
Reference in New Issue
Block a user