linux/drivers/gpu/drm/i915/display/intel_frontbuffer.h
José Roberto de Souza ef39826c12 drm/i915/display: Fix glitches when moving cursor with PSR2 selective fetch enabled
Legacy cursor APIs are handled by intel_legacy_cursor_update(), that
calls drm_atomic_helper_update_plane() when going through the
slow/atomic path to update cursor, what was the case for PSR2
selective fetch.

drm_atomic_helper_update_plane() sets
drm_atomic_state->legacy_cursor_update to true when updating the
cursor plane, to allow several cursor updates to happen within the
same frame, as userspace does that.
If drivers waited for a vblank increment at the end of every cursor
movement that would cause a visible lag in the cursor.

But this optimization do not properly work with PSR2 selective fetch
dirt area calculation, for example if within a single frame the cursor
had 3 moves the final dirt area programmed to PSR2_MAN_TRK_CTL would
be based in the second movement as old state and third movement as new
state, not updating the area where cursor was in the first state.

So here switching back to the fast path approach in
intel_legacy_cursor_update() and handling cursor movements as
frontbuffer rendering(psr_force_hw_tracking_exit()), that is not the
most optimal for power-savings but is the solution that we have until
mailbox style updates is implemented.

Also removing the cursor workaround as not it is properly undestand
the issue and is know that it will never cover all the cases.

Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Gwan-gyeong Mun <gwan-gyeong.mun@intel.com>
Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Acked-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Gwan-gyeong Mun <gwan-gyeong.mun@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210930001409.254817-5-jose.souza@intel.com
2021-09-30 15:04:44 -07:00

153 lines
4.3 KiB
C

/*
* Copyright (c) 2014-2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef __INTEL_FRONTBUFFER_H__
#define __INTEL_FRONTBUFFER_H__
#include <linux/atomic.h>
#include <linux/kref.h>
#include "gem/i915_gem_object_types.h"
#include "i915_active.h"
struct drm_i915_private;
enum fb_op_origin {
ORIGIN_CPU = 0,
ORIGIN_CS,
ORIGIN_FLIP,
ORIGIN_DIRTYFB,
ORIGIN_CURSOR_UPDATE,
};
struct intel_frontbuffer {
struct kref ref;
atomic_t bits;
struct i915_active write;
struct drm_i915_gem_object *obj;
struct rcu_head rcu;
};
void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip(struct drm_i915_private *i915,
unsigned frontbuffer_bits);
void intel_frontbuffer_put(struct intel_frontbuffer *front);
static inline struct intel_frontbuffer *
__intel_frontbuffer_get(const struct drm_i915_gem_object *obj)
{
struct intel_frontbuffer *front;
if (likely(!rcu_access_pointer(obj->frontbuffer)))
return NULL;
rcu_read_lock();
do {
front = rcu_dereference(obj->frontbuffer);
if (!front)
break;
if (unlikely(!kref_get_unless_zero(&front->ref)))
continue;
if (likely(front == rcu_access_pointer(obj->frontbuffer)))
break;
intel_frontbuffer_put(front);
} while (1);
rcu_read_unlock();
return front;
}
struct intel_frontbuffer *
intel_frontbuffer_get(struct drm_i915_gem_object *obj);
void __intel_fb_invalidate(struct intel_frontbuffer *front,
enum fb_op_origin origin,
unsigned int frontbuffer_bits);
/**
* intel_frontbuffer_invalidate - invalidate frontbuffer object
* @front: GEM object to invalidate
* @origin: which operation caused the invalidation
*
* This function gets called every time rendering on the given object starts and
* frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
* be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
* until the rendering completes or a flip on this frontbuffer plane is
* scheduled.
*/
static inline bool intel_frontbuffer_invalidate(struct intel_frontbuffer *front,
enum fb_op_origin origin)
{
unsigned int frontbuffer_bits;
if (!front)
return false;
frontbuffer_bits = atomic_read(&front->bits);
if (!frontbuffer_bits)
return false;
__intel_fb_invalidate(front, origin, frontbuffer_bits);
return true;
}
void __intel_fb_flush(struct intel_frontbuffer *front,
enum fb_op_origin origin,
unsigned int frontbuffer_bits);
/**
* intel_frontbuffer_flush - flush frontbuffer object
* @front: GEM object to flush
* @origin: which operation caused the flush
*
* This function gets called every time rendering on the given object has
* completed and frontbuffer caching can be started again.
*/
static inline void intel_frontbuffer_flush(struct intel_frontbuffer *front,
enum fb_op_origin origin)
{
unsigned int frontbuffer_bits;
if (!front)
return;
frontbuffer_bits = atomic_read(&front->bits);
if (!frontbuffer_bits)
return;
__intel_fb_flush(front, origin, frontbuffer_bits);
}
void intel_frontbuffer_track(struct intel_frontbuffer *old,
struct intel_frontbuffer *new,
unsigned int frontbuffer_bits);
#endif /* __INTEL_FRONTBUFFER_H__ */