0ef1905ecf
Our current global state handling is pretty ad-hoc. Let's try to make it better by imitating the standard drm core private object approach. The reason why we don't want to directly use the private objects is locking; Each private object has its own lock so if we introduce any global private objects we get serialized by that single lock across all pipes. The global state apporoach instead uses a read/write lock type of approach where each individual crtc lock counts as a read lock, and grabbing all the crtc locks allows one write access. Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200120174728.21095-15-ville.syrjala@linux.intel.com Reviewed-by: Imre Deak <imre.deak@intel.com>
224 lines
5.4 KiB
C
224 lines
5.4 KiB
C
// SPDX-License-Identifier: MIT
|
|
/*
|
|
* Copyright © 2020 Intel Corporation
|
|
*/
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include "i915_drv.h"
|
|
#include "intel_atomic.h"
|
|
#include "intel_display_types.h"
|
|
#include "intel_global_state.h"
|
|
|
|
void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
|
|
struct intel_global_obj *obj,
|
|
struct intel_global_state *state,
|
|
const struct intel_global_state_funcs *funcs)
|
|
{
|
|
memset(obj, 0, sizeof(*obj));
|
|
|
|
obj->state = state;
|
|
obj->funcs = funcs;
|
|
list_add_tail(&obj->head, &dev_priv->global_obj_list);
|
|
}
|
|
|
|
void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv)
|
|
{
|
|
struct intel_global_obj *obj, *next;
|
|
|
|
list_for_each_entry_safe(obj, next, &dev_priv->global_obj_list, head) {
|
|
list_del(&obj->head);
|
|
obj->funcs->atomic_destroy_state(obj, obj->state);
|
|
}
|
|
}
|
|
|
|
static void assert_global_state_write_locked(struct drm_i915_private *dev_priv)
|
|
{
|
|
struct intel_crtc *crtc;
|
|
|
|
for_each_intel_crtc(&dev_priv->drm, crtc)
|
|
drm_modeset_lock_assert_held(&crtc->base.mutex);
|
|
}
|
|
|
|
static bool modeset_lock_is_held(struct drm_modeset_acquire_ctx *ctx,
|
|
struct drm_modeset_lock *lock)
|
|
{
|
|
struct drm_modeset_lock *l;
|
|
|
|
list_for_each_entry(l, &ctx->locked, head) {
|
|
if (lock == l)
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
static void assert_global_state_read_locked(struct intel_atomic_state *state)
|
|
{
|
|
struct drm_modeset_acquire_ctx *ctx = state->base.acquire_ctx;
|
|
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
|
struct intel_crtc *crtc;
|
|
|
|
for_each_intel_crtc(&dev_priv->drm, crtc) {
|
|
if (modeset_lock_is_held(ctx, &crtc->base.mutex))
|
|
return;
|
|
}
|
|
|
|
WARN(1, "Global state not read locked\n");
|
|
}
|
|
|
|
struct intel_global_state *
|
|
intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
|
|
struct intel_global_obj *obj)
|
|
{
|
|
int index, num_objs, i;
|
|
size_t size;
|
|
struct __intel_global_objs_state *arr;
|
|
struct intel_global_state *obj_state;
|
|
|
|
for (i = 0; i < state->num_global_objs; i++)
|
|
if (obj == state->global_objs[i].ptr)
|
|
return state->global_objs[i].state;
|
|
|
|
assert_global_state_read_locked(state);
|
|
|
|
num_objs = state->num_global_objs + 1;
|
|
size = sizeof(*state->global_objs) * num_objs;
|
|
arr = krealloc(state->global_objs, size, GFP_KERNEL);
|
|
if (!arr)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
state->global_objs = arr;
|
|
index = state->num_global_objs;
|
|
memset(&state->global_objs[index], 0, sizeof(*state->global_objs));
|
|
|
|
obj_state = obj->funcs->atomic_duplicate_state(obj);
|
|
if (!obj_state)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
obj_state->changed = false;
|
|
|
|
state->global_objs[index].state = obj_state;
|
|
state->global_objs[index].old_state = obj->state;
|
|
state->global_objs[index].new_state = obj_state;
|
|
state->global_objs[index].ptr = obj;
|
|
obj_state->state = state;
|
|
|
|
state->num_global_objs = num_objs;
|
|
|
|
DRM_DEBUG_ATOMIC("Added new global object %p state %p to %p\n",
|
|
obj, obj_state, state);
|
|
|
|
return obj_state;
|
|
}
|
|
|
|
struct intel_global_state *
|
|
intel_atomic_get_old_global_obj_state(struct intel_atomic_state *state,
|
|
struct intel_global_obj *obj)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < state->num_global_objs; i++)
|
|
if (obj == state->global_objs[i].ptr)
|
|
return state->global_objs[i].old_state;
|
|
|
|
return NULL;
|
|
}
|
|
|
|
struct intel_global_state *
|
|
intel_atomic_get_new_global_obj_state(struct intel_atomic_state *state,
|
|
struct intel_global_obj *obj)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < state->num_global_objs; i++)
|
|
if (obj == state->global_objs[i].ptr)
|
|
return state->global_objs[i].new_state;
|
|
|
|
return NULL;
|
|
}
|
|
|
|
void intel_atomic_swap_global_state(struct intel_atomic_state *state)
|
|
{
|
|
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
|
struct intel_global_state *old_obj_state, *new_obj_state;
|
|
struct intel_global_obj *obj;
|
|
int i;
|
|
|
|
for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
|
|
new_obj_state, i) {
|
|
WARN_ON(obj->state != old_obj_state);
|
|
|
|
/*
|
|
* If the new state wasn't modified (and properly
|
|
* locked for write access) we throw it away.
|
|
*/
|
|
if (!new_obj_state->changed)
|
|
continue;
|
|
|
|
assert_global_state_write_locked(dev_priv);
|
|
|
|
old_obj_state->state = state;
|
|
new_obj_state->state = NULL;
|
|
|
|
state->global_objs[i].state = old_obj_state;
|
|
obj->state = new_obj_state;
|
|
}
|
|
}
|
|
|
|
void intel_atomic_clear_global_state(struct intel_atomic_state *state)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < state->num_global_objs; i++) {
|
|
struct intel_global_obj *obj = state->global_objs[i].ptr;
|
|
|
|
obj->funcs->atomic_destroy_state(obj,
|
|
state->global_objs[i].state);
|
|
state->global_objs[i].ptr = NULL;
|
|
state->global_objs[i].state = NULL;
|
|
state->global_objs[i].old_state = NULL;
|
|
state->global_objs[i].new_state = NULL;
|
|
}
|
|
state->num_global_objs = 0;
|
|
}
|
|
|
|
int intel_atomic_lock_global_state(struct intel_global_state *obj_state)
|
|
{
|
|
struct intel_atomic_state *state = obj_state->state;
|
|
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
|
struct intel_crtc *crtc;
|
|
|
|
for_each_intel_crtc(&dev_priv->drm, crtc) {
|
|
int ret;
|
|
|
|
ret = drm_modeset_lock(&crtc->base.mutex,
|
|
state->base.acquire_ctx);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
obj_state->changed = true;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int intel_atomic_serialize_global_state(struct intel_global_state *obj_state)
|
|
{
|
|
struct intel_atomic_state *state = obj_state->state;
|
|
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
|
struct intel_crtc *crtc;
|
|
|
|
for_each_intel_crtc(&dev_priv->drm, crtc) {
|
|
struct intel_crtc_state *crtc_state;
|
|
|
|
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
|
|
if (IS_ERR(crtc_state))
|
|
return PTR_ERR(crtc_state);
|
|
}
|
|
|
|
obj_state->changed = true;
|
|
|
|
return 0;
|
|
}
|