drm/gem: Add LRU/shrinker helper

Add a simple LRU helper to assist with driver's shrinker implementation.
It handles tracking the number of backing pages associated with a given
LRU, and provides a helper to implement shrinker_scan.

A driver can use multiple LRU instances to track objects in various
states, for example a dontneed LRU for purgeable objects, a willneed LRU
for evictable objects, and an unpinned LRU for objects without backing
pages.

All LRUs that the object can be moved between must share a single lock.

v2: lockdep_assert_held() instead of WARN_ON(!mutex_is_locked())
v3: make drm_gem_lru_move_tail_locked() static until there is a user

Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Thomas Zimmermann <tzimmermann@suse.de>
Cc: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Rob Clark <robdclark@chromium.org>
Reviewed-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Patchwork: https://patchwork.freedesktop.org/patch/496128/
Link: https://lore.kernel.org/r/20220802155152.1727594-10-robdclark@gmail.com
This commit is contained in:
Rob Clark 2022-08-02 08:51:42 -07:00
parent da53d8b546
commit e7c2af13f8
2 changed files with 225 additions and 0 deletions

View File

@ -165,6 +165,7 @@ void drm_gem_private_object_init(struct drm_device *dev,
obj->resv = &obj->_resv;
drm_vma_node_reset(&obj->vma_node);
INIT_LIST_HEAD(&obj->lru_node);
}
EXPORT_SYMBOL(drm_gem_private_object_init);
@ -951,6 +952,7 @@ drm_gem_object_release(struct drm_gem_object *obj)
dma_resv_fini(&obj->_resv);
drm_gem_free_mmap_offset(obj);
drm_gem_lru_remove(obj);
}
EXPORT_SYMBOL(drm_gem_object_release);
@ -1274,3 +1276,171 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
ww_acquire_fini(acquire_ctx);
}
EXPORT_SYMBOL(drm_gem_unlock_reservations);
/**
* drm_gem_lru_init - initialize a LRU
*
* @lru: The LRU to initialize
* @lock: The lock protecting the LRU
*/
void
drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock)
{
lru->lock = lock;
lru->count = 0;
INIT_LIST_HEAD(&lru->list);
}
EXPORT_SYMBOL(drm_gem_lru_init);
static void
drm_gem_lru_remove_locked(struct drm_gem_object *obj)
{
obj->lru->count -= obj->size >> PAGE_SHIFT;
WARN_ON(obj->lru->count < 0);
list_del(&obj->lru_node);
obj->lru = NULL;
}
/**
* drm_gem_lru_remove - remove object from whatever LRU it is in
*
* If the object is currently in any LRU, remove it.
*
* @obj: The GEM object to remove from current LRU
*/
void
drm_gem_lru_remove(struct drm_gem_object *obj)
{
struct drm_gem_lru *lru = obj->lru;
if (!lru)
return;
mutex_lock(lru->lock);
drm_gem_lru_remove_locked(obj);
mutex_unlock(lru->lock);
}
EXPORT_SYMBOL(drm_gem_lru_remove);
static void
drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj)
{
lockdep_assert_held_once(lru->lock);
if (obj->lru)
drm_gem_lru_remove_locked(obj);
lru->count += obj->size >> PAGE_SHIFT;
list_add_tail(&obj->lru_node, &lru->list);
obj->lru = lru;
}
/**
* drm_gem_lru_move_tail - move the object to the tail of the LRU
*
* If the object is already in this LRU it will be moved to the
* tail. Otherwise it will be removed from whichever other LRU
* it is in (if any) and moved into this LRU.
*
* @lru: The LRU to move the object into.
* @obj: The GEM object to move into this LRU
*/
void
drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj)
{
mutex_lock(lru->lock);
drm_gem_lru_move_tail_locked(lru, obj);
mutex_unlock(lru->lock);
}
EXPORT_SYMBOL(drm_gem_lru_move_tail);
/**
* drm_gem_lru_scan - helper to implement shrinker.scan_objects
*
* If the shrink callback succeeds, it is expected that the driver
* move the object out of this LRU.
*
* If the LRU possibly contain active buffers, it is the responsibility
* of the shrink callback to check for this (ie. dma_resv_test_signaled())
* or if necessary block until the buffer becomes idle.
*
* @lru: The LRU to scan
* @nr_to_scan: The number of pages to try to reclaim
* @shrink: Callback to try to shrink/reclaim the object.
*/
unsigned long
drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
bool (*shrink)(struct drm_gem_object *obj))
{
struct drm_gem_lru still_in_lru;
struct drm_gem_object *obj;
unsigned freed = 0;
drm_gem_lru_init(&still_in_lru, lru->lock);
mutex_lock(lru->lock);
while (freed < nr_to_scan) {
obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node);
if (!obj)
break;
drm_gem_lru_move_tail_locked(&still_in_lru, obj);
/*
* If it's in the process of being freed, gem_object->free()
* may be blocked on lock waiting to remove it. So just
* skip it.
*/
if (!kref_get_unless_zero(&obj->refcount))
continue;
/*
* Now that we own a reference, we can drop the lock for the
* rest of the loop body, to reduce contention with other
* code paths that need the LRU lock
*/
mutex_unlock(lru->lock);
/*
* Note that this still needs to be trylock, since we can
* hit shrinker in response to trying to get backing pages
* for this obj (ie. while it's lock is already held)
*/
if (!dma_resv_trylock(obj->resv))
goto tail;
if (shrink(obj)) {
freed += obj->size >> PAGE_SHIFT;
/*
* If we succeeded in releasing the object's backing
* pages, we expect the driver to have moved the object
* out of this LRU
*/
WARN_ON(obj->lru == &still_in_lru);
WARN_ON(obj->lru == lru);
}
dma_resv_unlock(obj->resv);
tail:
drm_gem_object_put(obj);
mutex_lock(lru->lock);
}
/*
* Move objects we've skipped over out of the temporary still_in_lru
* back into this LRU
*/
list_for_each_entry (obj, &still_in_lru.list, lru_node)
obj->lru = lru;
list_splice_tail(&still_in_lru.list, &lru->list);
lru->count += still_in_lru.count;
mutex_unlock(lru->lock);
return freed;
}
EXPORT_SYMBOL(drm_gem_lru_scan);

View File

@ -174,6 +174,41 @@ struct drm_gem_object_funcs {
const struct vm_operations_struct *vm_ops;
};
/**
* struct drm_gem_lru - A simple LRU helper
*
* A helper for tracking GEM objects in a given state, to aid in
* driver's shrinker implementation. Tracks the count of pages
* for lockless &shrinker.count_objects, and provides
* &drm_gem_lru_scan for driver's &shrinker.scan_objects
* implementation.
*/
struct drm_gem_lru {
/**
* @lock:
*
* Lock protecting movement of GEM objects between LRUs. All
* LRUs that the object can move between should be protected
* by the same lock.
*/
struct mutex *lock;
/**
* @count:
*
* The total number of backing pages of the GEM objects in
* this LRU.
*/
long count;
/**
* @list:
*
* The LRU list.
*/
struct list_head list;
};
/**
* struct drm_gem_object - GEM buffer object
*
@ -312,6 +347,20 @@ struct drm_gem_object {
*
*/
const struct drm_gem_object_funcs *funcs;
/**
* @lru_node:
*
* List node in a &drm_gem_lru.
*/
struct list_head lru_node;
/**
* @lru:
*
* The current LRU list that the GEM object is on.
*/
struct drm_gem_lru *lru;
};
/**
@ -420,4 +469,10 @@ void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
u32 handle, u64 *offset);
void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock);
void drm_gem_lru_remove(struct drm_gem_object *obj);
void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj);
unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
bool (*shrink)(struct drm_gem_object *obj));
#endif /* __DRM_GEM_H__ */