2016-11-11 10:43:54 +00:00
|
|
|
/*
|
2019-05-28 09:29:42 +00:00
|
|
|
* SPDX-License-Identifier: MIT
|
2016-11-11 10:43:54 +00:00
|
|
|
*
|
2019-05-28 09:29:42 +00:00
|
|
|
* Copyright © 2016 Intel Corporation
|
2016-11-11 10:43:54 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __I915_GEM_OBJECT_H__
|
|
|
|
#define __I915_GEM_OBJECT_H__
|
|
|
|
|
|
|
|
#include <drm/drm_gem.h>
|
2019-01-08 08:27:09 +00:00
|
|
|
#include <drm/drm_file.h>
|
|
|
|
#include <drm/drm_device.h>
|
2016-11-11 10:43:54 +00:00
|
|
|
|
2019-12-18 10:40:43 +00:00
|
|
|
#include "display/intel_frontbuffer.h"
|
2019-05-28 09:29:44 +00:00
|
|
|
#include "i915_gem_object_types.h"
|
2019-05-28 09:29:48 +00:00
|
|
|
#include "i915_gem_gtt.h"
|
2020-01-07 13:40:09 +00:00
|
|
|
#include "i915_vma_types.h"
|
2019-05-28 09:29:48 +00:00
|
|
|
|
2021-01-22 18:15:14 +00:00
|
|
|
/*
|
|
|
|
* XXX: There is a prevalence of the assumption that we fit the
|
|
|
|
* object's page count inside a 32bit _signed_ variable. Let's document
|
|
|
|
* this and catch if we ever need to fix it. In the meantime, if you do
|
|
|
|
* spot such a local variable, please consider fixing!
|
|
|
|
*
|
|
|
|
* Aside from our own locals (for which we have no excuse!):
|
|
|
|
* - sg_table embeds unsigned int for num_pages
|
|
|
|
* - get_user_pages*() mixed ints with longs
|
|
|
|
*/
|
|
|
|
#define GEM_CHECK_SIZE_OVERFLOW(sz) \
|
|
|
|
GEM_WARN_ON((sz) >> PAGE_SHIFT > INT_MAX)
|
|
|
|
|
|
|
|
static inline bool i915_gem_object_size_2big(u64 size)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
|
|
|
|
if (GEM_CHECK_SIZE_OVERFLOW(size))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (overflows_type(size, obj->base.size))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-05-28 09:29:45 +00:00
|
|
|
void i915_gem_init__objects(struct drm_i915_private *i915);
|
|
|
|
|
2019-02-28 10:20:34 +00:00
|
|
|
struct drm_i915_gem_object *i915_gem_object_alloc(void);
|
|
|
|
void i915_gem_object_free(struct drm_i915_gem_object *obj);
|
|
|
|
|
2019-05-28 09:29:45 +00:00
|
|
|
void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
2019-10-22 14:45:01 +00:00
|
|
|
const struct drm_i915_gem_object_ops *ops,
|
2021-03-23 15:49:56 +00:00
|
|
|
struct lock_class_key *key,
|
|
|
|
unsigned alloc_flags);
|
2019-05-28 09:29:45 +00:00
|
|
|
struct drm_i915_gem_object *
|
2019-10-18 09:07:50 +00:00
|
|
|
i915_gem_object_create_shmem(struct drm_i915_private *i915,
|
|
|
|
resource_size_t size);
|
2019-05-28 09:29:45 +00:00
|
|
|
struct drm_i915_gem_object *
|
|
|
|
i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
|
2019-10-18 09:07:50 +00:00
|
|
|
const void *data, resource_size_t size);
|
2019-05-28 09:29:45 +00:00
|
|
|
|
|
|
|
extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
|
2019-05-28 09:29:45 +00:00
|
|
|
void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
|
|
|
|
struct sg_table *pages,
|
|
|
|
bool needs_clflush);
|
|
|
|
|
2021-03-23 15:49:57 +00:00
|
|
|
int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
|
|
|
|
const struct drm_i915_gem_pwrite *args);
|
|
|
|
int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
|
|
|
|
const struct drm_i915_gem_pread *args);
|
|
|
|
|
2019-05-28 09:29:46 +00:00
|
|
|
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
|
2021-03-23 15:49:58 +00:00
|
|
|
void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj,
|
|
|
|
struct sg_table *pages);
|
2021-03-23 15:49:57 +00:00
|
|
|
void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
|
|
|
|
struct sg_table *pages);
|
|
|
|
|
2019-05-28 09:29:45 +00:00
|
|
|
void i915_gem_flush_free_objects(struct drm_i915_private *i915);
|
|
|
|
|
2019-05-28 09:29:46 +00:00
|
|
|
struct sg_table *
|
|
|
|
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
|
|
|
|
void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
|
|
|
|
|
2016-11-11 10:43:54 +00:00
|
|
|
/**
|
|
|
|
* i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
|
|
|
|
* @filp: DRM file private date
|
|
|
|
* @handle: userspace handle
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
*
|
|
|
|
* A pointer to the object named by the handle if such exists on @filp, NULL
|
|
|
|
* otherwise. This object is only valid whilst under the RCU read lock, and
|
|
|
|
* note carefully the object may be in the process of being destroyed.
|
|
|
|
*/
|
|
|
|
static inline struct drm_i915_gem_object *
|
|
|
|
i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_LOCKDEP
|
|
|
|
WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
|
|
|
|
#endif
|
|
|
|
return idr_find(&file->object_idr, handle);
|
|
|
|
}
|
|
|
|
|
2020-01-30 14:39:31 +00:00
|
|
|
static inline struct drm_i915_gem_object *
|
|
|
|
i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
if (obj && !kref_get_unless_zero(&obj->base.refcount))
|
|
|
|
obj = NULL;
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
2016-11-11 10:43:54 +00:00
|
|
|
static inline struct drm_i915_gem_object *
|
|
|
|
i915_gem_object_lookup(struct drm_file *file, u32 handle)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
obj = i915_gem_object_lookup_rcu(file, handle);
|
2020-01-30 14:39:31 +00:00
|
|
|
obj = i915_gem_object_get_rcu(obj);
|
2016-11-11 10:43:54 +00:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
__deprecated
|
2019-07-12 11:24:25 +00:00
|
|
|
struct drm_gem_object *
|
2016-11-11 10:43:54 +00:00
|
|
|
drm_gem_object_lookup(struct drm_file *file, u32 handle);
|
|
|
|
|
|
|
|
__attribute__((nonnull))
|
|
|
|
static inline struct drm_i915_gem_object *
|
|
|
|
i915_gem_object_get(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
2018-06-18 11:01:51 +00:00
|
|
|
drm_gem_object_get(&obj->base);
|
2016-11-11 10:43:54 +00:00
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
__attribute__((nonnull))
|
|
|
|
static inline void
|
|
|
|
i915_gem_object_put(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
2018-06-18 11:01:52 +00:00
|
|
|
__drm_gem_object_put(&obj->base);
|
2016-11-11 10:43:54 +00:00
|
|
|
}
|
|
|
|
|
2019-08-11 08:06:32 +00:00
|
|
|
#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
|
2019-05-28 09:29:51 +00:00
|
|
|
|
2021-03-23 15:50:21 +00:00
|
|
|
/*
|
|
|
|
* If more than one potential simultaneous locker, assert held.
|
|
|
|
*/
|
|
|
|
static inline void assert_object_held_shared(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Note mm list lookup is protected by
|
|
|
|
* kref_get_unless_zero().
|
|
|
|
*/
|
|
|
|
if (IS_ENABLED(CONFIG_LOCKDEP) &&
|
|
|
|
kref_read(&obj->base.refcount) > 0)
|
2021-03-23 15:50:50 +00:00
|
|
|
assert_object_held(obj);
|
2021-03-23 15:50:21 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:45 +00:00
|
|
|
static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
|
|
|
|
struct i915_gem_ww_ctx *ww,
|
|
|
|
bool intr)
|
2017-03-01 15:41:28 +00:00
|
|
|
{
|
2020-08-19 14:08:45 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (intr)
|
|
|
|
ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL);
|
|
|
|
else
|
|
|
|
ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL);
|
|
|
|
|
|
|
|
if (!ret && ww)
|
|
|
|
list_add_tail(&obj->obj_link, &ww->obj_list);
|
|
|
|
if (ret == -EALREADY)
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
if (ret == -EDEADLK)
|
|
|
|
ww->contended = obj;
|
|
|
|
|
|
|
|
return ret;
|
2017-03-01 15:41:28 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:45 +00:00
|
|
|
static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj,
|
|
|
|
struct i915_gem_ww_ctx *ww)
|
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).
In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.
Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!
v2: Add some commentary, and some helpers to reduce patch churn.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 13:39:58 +00:00
|
|
|
{
|
2020-08-19 14:08:45 +00:00
|
|
|
return __i915_gem_object_lock(obj, ww, ww && ww->intr);
|
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).
In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.
Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!
v2: Add some commentary, and some helpers to reduce patch churn.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 13:39:58 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:45 +00:00
|
|
|
static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj,
|
|
|
|
struct i915_gem_ww_ctx *ww)
|
2019-05-28 09:29:51 +00:00
|
|
|
{
|
2020-08-19 14:08:45 +00:00
|
|
|
WARN_ON(ww && !ww->intr);
|
|
|
|
return __i915_gem_object_lock(obj, ww, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
return dma_resv_trylock(obj->base.resv);
|
2019-05-28 09:29:51 +00:00
|
|
|
}
|
|
|
|
|
2017-03-01 15:41:28 +00:00
|
|
|
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
2019-08-11 08:06:32 +00:00
|
|
|
dma_resv_unlock(obj->base.resv);
|
2017-03-01 15:41:28 +00:00
|
|
|
}
|
|
|
|
|
2018-07-12 18:53:13 +00:00
|
|
|
static inline void
|
|
|
|
i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
2019-12-04 12:00:32 +00:00
|
|
|
obj->flags |= I915_BO_READONLY;
|
2018-07-12 18:53:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
|
|
|
i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
|
|
|
|
{
|
2019-12-04 12:00:32 +00:00
|
|
|
return obj->flags & I915_BO_READONLY;
|
2018-07-12 18:53:13 +00:00
|
|
|
}
|
|
|
|
|
2019-10-08 16:01:15 +00:00
|
|
|
static inline bool
|
|
|
|
i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
|
|
|
|
}
|
|
|
|
|
2019-10-08 16:01:16 +00:00
|
|
|
static inline bool
|
|
|
|
i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
return obj->flags & I915_BO_ALLOC_VOLATILE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
obj->flags |= I915_BO_ALLOC_VOLATILE;
|
|
|
|
}
|
|
|
|
|
2021-01-19 21:43:34 +00:00
|
|
|
static inline bool
|
|
|
|
i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
return test_bit(I915_TILING_QUIRK_BIT, &obj->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
set_bit(I915_TILING_QUIRK_BIT, &obj->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
clear_bit(I915_TILING_QUIRK_BIT, &obj->flags);
|
|
|
|
}
|
|
|
|
|
2019-10-02 12:30:14 +00:00
|
|
|
static inline bool
|
|
|
|
i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
|
|
|
|
unsigned long flags)
|
|
|
|
{
|
|
|
|
return obj->ops->flags & flags;
|
|
|
|
}
|
|
|
|
|
2016-11-11 10:43:54 +00:00
|
|
|
static inline bool
|
|
|
|
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
|
|
|
|
{
|
2021-03-23 15:49:56 +00:00
|
|
|
return obj->flags & I915_BO_ALLOC_STRUCT_PAGE;
|
2016-11-11 10:43:54 +00:00
|
|
|
}
|
|
|
|
|
2021-01-20 21:38:34 +00:00
|
|
|
static inline bool
|
|
|
|
i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM);
|
|
|
|
}
|
|
|
|
|
2016-11-11 10:43:54 +00:00
|
|
|
static inline bool
|
|
|
|
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
|
|
|
|
{
|
2019-10-02 12:30:14 +00:00
|
|
|
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
|
2016-11-11 10:43:54 +00:00
|
|
|
}
|
|
|
|
|
2017-11-14 10:25:13 +00:00
|
|
|
static inline bool
|
|
|
|
i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
|
|
|
|
{
|
2019-10-02 12:30:14 +00:00
|
|
|
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
|
2017-11-14 10:25:13 +00:00
|
|
|
}
|
|
|
|
|
2019-09-28 08:25:46 +00:00
|
|
|
static inline bool
|
2020-02-04 16:23:02 +00:00
|
|
|
i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
|
2019-09-28 08:25:46 +00:00
|
|
|
{
|
2020-02-04 16:23:02 +00:00
|
|
|
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP);
|
2019-09-28 08:25:46 +00:00
|
|
|
}
|
|
|
|
|
2017-03-01 15:41:28 +00:00
|
|
|
static inline bool
|
|
|
|
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
|
|
|
|
{
|
2019-08-16 07:46:35 +00:00
|
|
|
return READ_ONCE(obj->frontbuffer);
|
2017-03-01 15:41:28 +00:00
|
|
|
}
|
|
|
|
|
2016-11-11 10:43:54 +00:00
|
|
|
static inline unsigned int
|
2018-07-25 15:54:47 +00:00
|
|
|
i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
|
2016-11-11 10:43:54 +00:00
|
|
|
{
|
|
|
|
return obj->tiling_and_stride & TILING_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
2018-07-25 15:54:47 +00:00
|
|
|
i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
|
2016-11-11 10:43:54 +00:00
|
|
|
{
|
|
|
|
return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int
|
2018-07-25 15:54:47 +00:00
|
|
|
i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
|
2016-11-11 10:43:54 +00:00
|
|
|
{
|
|
|
|
return obj->tiling_and_stride & STRIDE_MASK;
|
|
|
|
}
|
|
|
|
|
2017-01-09 16:16:08 +00:00
|
|
|
static inline unsigned int
|
|
|
|
i915_gem_tile_height(unsigned int tiling)
|
|
|
|
{
|
|
|
|
GEM_BUG_ON(!tiling);
|
|
|
|
return tiling == I915_TILING_Y ? 32 : 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int
|
2018-07-25 15:54:47 +00:00
|
|
|
i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
|
2017-01-09 16:16:08 +00:00
|
|
|
{
|
|
|
|
return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int
|
2018-07-25 15:54:47 +00:00
|
|
|
i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
|
2017-01-09 16:16:08 +00:00
|
|
|
{
|
|
|
|
return (i915_gem_object_get_stride(obj) *
|
|
|
|
i915_gem_object_get_tile_height(obj));
|
|
|
|
}
|
|
|
|
|
2017-01-10 12:10:45 +00:00
|
|
|
int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
|
|
|
|
unsigned int tiling, unsigned int stride);
|
|
|
|
|
2019-05-28 09:29:44 +00:00
|
|
|
struct scatterlist *
|
2020-10-06 09:25:08 +00:00
|
|
|
__i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
|
|
|
|
struct i915_gem_object_page_iter *iter,
|
|
|
|
unsigned int n,
|
2021-03-23 15:49:52 +00:00
|
|
|
unsigned int *offset, bool allow_alloc);
|
2020-10-06 09:25:08 +00:00
|
|
|
|
|
|
|
static inline struct scatterlist *
|
2019-05-28 09:29:44 +00:00
|
|
|
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
|
2020-10-06 09:25:08 +00:00
|
|
|
unsigned int n,
|
2021-03-23 15:49:52 +00:00
|
|
|
unsigned int *offset, bool allow_alloc)
|
2020-10-06 09:25:08 +00:00
|
|
|
{
|
2021-03-23 15:49:52 +00:00
|
|
|
return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, allow_alloc);
|
2020-10-06 09:25:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct scatterlist *
|
|
|
|
i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj,
|
|
|
|
unsigned int n,
|
2021-03-23 15:49:52 +00:00
|
|
|
unsigned int *offset, bool allow_alloc)
|
2020-10-06 09:25:08 +00:00
|
|
|
{
|
2021-03-23 15:49:52 +00:00
|
|
|
return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, allow_alloc);
|
2020-10-06 09:25:08 +00:00
|
|
|
}
|
2019-05-28 09:29:44 +00:00
|
|
|
|
|
|
|
struct page *
|
|
|
|
i915_gem_object_get_page(struct drm_i915_gem_object *obj,
|
|
|
|
unsigned int n);
|
|
|
|
|
2020-09-08 05:40:43 +00:00
|
|
|
struct page *
|
|
|
|
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
|
|
|
|
unsigned int n);
|
|
|
|
|
2019-05-28 09:29:44 +00:00
|
|
|
dma_addr_t
|
|
|
|
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
|
|
|
|
unsigned long n,
|
|
|
|
unsigned int *len);
|
|
|
|
|
|
|
|
dma_addr_t
|
|
|
|
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
|
|
|
|
unsigned long n);
|
|
|
|
|
|
|
|
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
|
|
|
|
struct sg_table *pages,
|
|
|
|
unsigned int sg_page_sizes);
|
2019-05-28 09:29:46 +00:00
|
|
|
|
|
|
|
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
|
2019-05-28 09:29:44 +00:00
|
|
|
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
|
|
|
|
|
|
|
|
static inline int __must_check
|
|
|
|
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
2021-03-23 15:50:50 +00:00
|
|
|
assert_object_held(obj);
|
2019-05-28 09:29:44 +00:00
|
|
|
|
|
|
|
if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return __i915_gem_object_get_pages(obj);
|
|
|
|
}
|
|
|
|
|
2021-03-23 15:50:25 +00:00
|
|
|
int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj);
|
|
|
|
|
2019-05-28 09:29:44 +00:00
|
|
|
static inline bool
|
|
|
|
i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
|
|
|
|
|
|
|
|
atomic_inc(&obj->mm.pages_pin_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
|
|
|
i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
return atomic_read(&obj->mm.pages_pin_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
|
|
|
|
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
|
|
|
|
|
|
|
atomic_dec(&obj->mm.pages_pin_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
__i915_gem_object_unpin_pages(obj);
|
|
|
|
}
|
|
|
|
|
drm/i915: Switch obj->mm.lock lockdep annotations on its head
The trouble with having a plain nesting flag for locks which do not
naturally nest (unlike block devices and their partitions, which is
the original motivation for nesting levels) is that lockdep will
never spot a true deadlock if you screw up.
This patch is an attempt at trying better, by highlighting a bit more
of the actual nature of the nesting that's going on. Essentially we
have two kinds of objects:
- objects without pages allocated, which cannot be on any lru and are
hence inaccessible to the shrinker.
- objects which have pages allocated, which are on an lru, and which
the shrinker can decide to throw out.
For the former type of object, memory allocations while holding
obj->mm.lock are permissible. For the latter they are not. And
get/put_pages transitions between the two types of objects.
This is still not entirely fool-proof since the rules might change.
But as long as we run such a code ever at runtime lockdep should be
able to observe the inconsistency and complain (like with any other
lockdep class that we've split up in multiple classes). But there are
a few clear benefits:
- We can drop the nesting flag parameter from
__i915_gem_object_put_pages, because that function by definition is
never going allocate memory, and calling it on an object which
doesn't have its pages allocated would be a bug.
- We strictly catch more bugs, since there's not only one place in the
entire tree which is annotated with the special class. All the
other places that had explicit lockdep nesting annotations we're now
going to leave up to lockdep again.
- Specifically this catches stuff like calling get_pages from
put_pages (which isn't really a good idea, if we can call get_pages
so could the shrinker). I've seen patches do exactly that.
Of course I fully expect CI will show me for the fool I am with this
one here :-)
v2: There can only be one (lockdep only has a cache for the first
subclass, not for deeper ones, and we don't want to make these locks
even slower). Still separate enums for better documentation.
Real fix: don't forget about phys objs and pin_map(), and fix the
shrinker to have the right annotations ... silly me.
v3: Forgot usertptr too ...
v4: Improve comment for pages_pin_count, drop the IMPORTANT comment
and instead prime lockdep (Chris).
v5: Appease checkpatch, no double empty lines (Chris)
v6: More rebasing over selftest changes. Also somehow I forgot to
push this patch :-/
Also format comments consistently while at it.
v7: Fix typo in commit message (Joonas)
Also drop the priming, with the lmem merge we now have allocations
while holding the lmem lock, which wreaks the generic priming I've
done in earlier patches. Should probably be resurrected when lmem is
fixed. See
commit 232a6ebae419193f5b8da4fa869ae5089ab105c2
Author: Matthew Auld <matthew.auld@intel.com>
Date: Tue Oct 8 17:01:14 2019 +0100
drm/i915: introduce intel_memory_region
I'm keeping the priming patch locally so it wont get lost.
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: "Tang, CQ" <cq.tang@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v5)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v6)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191105090148.30269-1-daniel.vetter@ffwll.ch
[mlankhorst: Fix commit typos pointed out by Michael Ruhl]
2019-11-05 09:01:48 +00:00
|
|
|
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
|
2019-05-28 09:29:46 +00:00
|
|
|
void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
|
|
|
|
void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
|
2019-05-28 09:29:44 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
|
|
|
|
* @obj: the object to map into kernel address space
|
|
|
|
* @type: the type of mapping, used to select pgprot_t
|
|
|
|
*
|
|
|
|
* Calls i915_gem_object_pin_pages() to prevent reaping of the object's
|
|
|
|
* pages and then returns a contiguous mapping of the backing storage into
|
|
|
|
* the kernel address space. Based on the @type of mapping, the PTE will be
|
|
|
|
* set to either WriteBack or WriteCombine (via pgprot_t).
|
|
|
|
*
|
|
|
|
* The caller is responsible for calling i915_gem_object_unpin_map() when the
|
|
|
|
* mapping is no longer required.
|
|
|
|
*
|
|
|
|
* Returns the pointer through which to access the mapped object, or an
|
|
|
|
* ERR_PTR() on error.
|
|
|
|
*/
|
|
|
|
void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
|
|
|
|
enum i915_map_type type);
|
|
|
|
|
2021-01-28 16:25:40 +00:00
|
|
|
void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
|
|
|
|
enum i915_map_type type);
|
|
|
|
|
2019-05-28 09:29:44 +00:00
|
|
|
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
|
|
|
|
unsigned long offset,
|
|
|
|
unsigned long size);
|
|
|
|
static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
__i915_gem_object_flush_map(obj, 0, obj->base.size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i915_gem_object_unpin_map - releases an earlier mapping
|
|
|
|
* @obj: the object to unmap
|
|
|
|
*
|
|
|
|
* After pinning the object and mapping its pages, once you are finished
|
|
|
|
* with your access, call i915_gem_object_unpin_map() to release the pin
|
|
|
|
* upon the mapping. Once the pin count reaches zero, that mapping may be
|
|
|
|
* removed.
|
|
|
|
*/
|
|
|
|
static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
i915_gem_object_unpin_pages(obj);
|
|
|
|
}
|
|
|
|
|
2020-07-08 17:37:47 +00:00
|
|
|
void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
|
|
|
|
|
2019-05-28 09:29:48 +00:00
|
|
|
int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
|
|
|
|
unsigned int *needs_clflush);
|
|
|
|
int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
|
|
|
|
unsigned int *needs_clflush);
|
|
|
|
#define CLFLUSH_BEFORE BIT(0)
|
|
|
|
#define CLFLUSH_AFTER BIT(1)
|
|
|
|
#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
i915_gem_object_unpin_pages(obj);
|
|
|
|
}
|
|
|
|
|
2016-11-11 10:43:54 +00:00
|
|
|
static inline struct intel_engine_cs *
|
|
|
|
i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct intel_engine_cs *engine = NULL;
|
|
|
|
struct dma_fence *fence;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2021-06-02 10:44:32 +00:00
|
|
|
fence = dma_resv_get_excl_unlocked(obj->base.resv);
|
2016-11-11 10:43:54 +00:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
|
|
|
|
engine = to_request(fence)->engine;
|
|
|
|
dma_fence_put(fence);
|
|
|
|
|
|
|
|
return engine;
|
|
|
|
}
|
|
|
|
|
2017-08-11 11:11:16 +00:00
|
|
|
void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
|
|
|
|
unsigned int cache_level);
|
2017-02-22 11:40:46 +00:00
|
|
|
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
|
2020-08-19 14:09:03 +00:00
|
|
|
void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
|
2017-02-22 11:40:46 +00:00
|
|
|
|
2019-05-28 09:29:48 +00:00
|
|
|
int __must_check
|
|
|
|
i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
|
|
|
|
int __must_check
|
|
|
|
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
|
|
|
|
int __must_check
|
|
|
|
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
|
|
|
|
struct i915_vma * __must_check
|
|
|
|
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
2021-03-23 15:50:11 +00:00
|
|
|
struct i915_gem_ww_ctx *ww,
|
2019-05-28 09:29:48 +00:00
|
|
|
u32 alignment,
|
|
|
|
const struct i915_ggtt_view *view,
|
|
|
|
unsigned int flags);
|
|
|
|
|
2019-08-02 21:21:36 +00:00
|
|
|
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
|
|
|
|
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
|
|
|
|
void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
|
|
|
|
|
2019-05-28 09:29:45 +00:00
|
|
|
static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
if (obj->cache_dirty)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
|
|
|
|
return true;
|
|
|
|
|
2019-09-02 04:02:47 +00:00
|
|
|
/* Currently in use by HW (display engine)? Keep flushed. */
|
|
|
|
return i915_gem_object_is_framebuffer(obj);
|
2019-05-28 09:29:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
obj->read_domains = I915_GEM_DOMAIN_CPU;
|
|
|
|
obj->write_domain = I915_GEM_DOMAIN_CPU;
|
|
|
|
if (cpu_write_needs_clflush(obj))
|
|
|
|
obj->cache_dirty = true;
|
|
|
|
}
|
2019-03-31 09:46:20 +00:00
|
|
|
|
2021-01-19 20:44:54 +00:00
|
|
|
void i915_gem_fence_wait_priority(struct dma_fence *fence,
|
|
|
|
const struct i915_sched_attr *attr);
|
|
|
|
|
2019-05-28 09:29:52 +00:00
|
|
|
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
|
|
|
|
unsigned int flags,
|
|
|
|
long timeout);
|
|
|
|
int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
|
|
|
|
unsigned int flags,
|
|
|
|
const struct i915_sched_attr *attr);
|
|
|
|
|
2019-12-18 10:40:43 +00:00
|
|
|
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
|
|
|
|
enum fb_op_origin origin);
|
|
|
|
void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
|
|
|
|
enum fb_op_origin origin);
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
|
|
|
|
enum fb_op_origin origin)
|
|
|
|
{
|
|
|
|
if (unlikely(rcu_access_pointer(obj->frontbuffer)))
|
|
|
|
__i915_gem_object_flush_frontbuffer(obj, origin);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
|
|
|
|
enum fb_op_origin origin)
|
|
|
|
{
|
|
|
|
if (unlikely(rcu_access_pointer(obj->frontbuffer)))
|
|
|
|
__i915_gem_object_invalidate_frontbuffer(obj, origin);
|
|
|
|
}
|
|
|
|
|
2021-01-20 21:38:34 +00:00
|
|
|
int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size);
|
|
|
|
|
2021-01-19 21:43:33 +00:00
|
|
|
bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
|
|
|
|
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
#ifdef CONFIG_MMU_NOTIFIER
|
|
|
|
static inline bool
|
|
|
|
i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
return obj->userptr.notifier.mm;
|
|
|
|
}
|
|
|
|
|
|
|
|
int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj);
|
|
|
|
int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj);
|
|
|
|
void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj);
|
|
|
|
int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj);
|
|
|
|
#else
|
|
|
|
static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; }
|
|
|
|
|
|
|
|
static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
|
|
|
|
static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
|
|
|
|
static inline void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); }
|
|
|
|
static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2019-02-28 10:20:34 +00:00
|
|
|
#endif
|