drm-misc and drm-intel pull request for topic/i915-ttm:
- Convert i915 lmem handling to ttm. - Add a patch to temporarily add a driver_private member to vma_node. - Use this to allow mixed object mmap handling for i915. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEuXvWqAysSYEJGuVH/lWMcqZwE8MFAmDDLAkACgkQ/lWMcqZw E8PZRA//Q27B74kNTbCSH+ou/htOEFsc37ADMIDUYVR0aVCQxuUyJICXek3tm9Po 91YzhEVzRv+ig3UxekvFSN1df85PaArDdlYdAP14TS3RvXzemMY7SwOZquWlVMC5 /21Eimagf8aTud3BwVWNyDL+OTADhpnrRBSGqtFU7m5YzAv2LgBzV6ftyj98u6nZ F5T/n4I+uMUK9mbcqopmRZ+EzqM48LtiucP0SgKKGL/2bL9xQCCoV7sF8bU5bTc5 DIPCxuLBMUahLRy5JEpIA8WwtP+DzCchjBuZqRsYQhP5Zk+pe8PTbmveYo4cfye8 b5a2SIJyQSF3yN2fkbmK8W4OrwxPYNG9FwTEzL4kCspSJ4mOmk4MTSOilmcbWc1T +h/nX4sbMm7oVSraEMrjBixgaA+X6UjcTyH+mf26vir4GiKI9klOaSMMAFSlX19d +C0IdW6gny8UJnb8vDR5P7/GBizxk7N9uuD+IeVc3NCkMMZIOwTDXLb9ebivJUtm 309uvhkWkCqmJULgRMXCAXD2CC0JqGgU5Wrrb/loKXNAkEtCaevcMbiPr43+aTqn RX5IAQmpPwYz/sHJIefxsrBRwaCuQeZsKzMNCVeGu74MaweDKn5AB/851vCmTETO Y5VNRXdKZ1AufduUCAXrfwcGtqT8FuGWzH6pqJ+e4ikFLCuHXD8= =mB3w -----END PGP SIGNATURE----- Merge tag 'tags/topic/i915-ttm-2021-06-11' into drm-misc-next drm-misc and drm-intel pull request for topic/i915-ttm: - Convert i915 lmem handling to ttm. - Add a patch to temporarily add a driver_private member to vma_node. - Use this to allow mixed object mmap handling for i915.
This commit is contained in:
commit
bfd616ff9a
@ -1148,15 +1148,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (node->readonly) {
|
||||
if (vma->vm_flags & VM_WRITE) {
|
||||
drm_gem_object_put(obj);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
}
|
||||
|
||||
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
|
||||
vma);
|
||||
|
||||
|
@ -155,6 +155,7 @@ gem-y += \
|
||||
gem/i915_gem_stolen.o \
|
||||
gem/i915_gem_throttle.o \
|
||||
gem/i915_gem_tiling.o \
|
||||
gem/i915_gem_ttm.o \
|
||||
gem/i915_gem_userptr.o \
|
||||
gem/i915_gem_wait.o \
|
||||
gem/i915_gemfs.o
|
||||
|
@ -11771,7 +11771,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
|
||||
|
||||
/* object is backed with LMEM for discrete */
|
||||
i915 = to_i915(obj->base.dev);
|
||||
if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj)) {
|
||||
if (HAS_LMEM(i915) && !i915_gem_object_validates_to_lmem(obj)) {
|
||||
/* object is "remote", not in local memory */
|
||||
i915_gem_object_put(obj);
|
||||
return ERR_PTR(-EREMOTE);
|
||||
|
@ -85,13 +85,10 @@ i915_gem_setup(struct drm_i915_gem_object *obj, u64 size)
|
||||
return -E2BIG;
|
||||
|
||||
/*
|
||||
* For now resort to CPU based clearing for device local-memory, in the
|
||||
* near future this will use the blitter engine for accelerated, GPU
|
||||
* based clearing.
|
||||
* I915_BO_ALLOC_USER will make sure the object is cleared before
|
||||
* any user access.
|
||||
*/
|
||||
flags = 0;
|
||||
if (mr->type == INTEL_MEMORY_LOCAL)
|
||||
flags = I915_BO_ALLOC_CPU_CLEAR;
|
||||
flags = I915_BO_ALLOC_USER;
|
||||
|
||||
ret = mr->ops->init_object(mr, obj, size, flags);
|
||||
if (ret)
|
||||
|
@ -4,74 +4,10 @@
|
||||
*/
|
||||
|
||||
#include "intel_memory_region.h"
|
||||
#include "intel_region_ttm.h"
|
||||
#include "gem/i915_gem_region.h"
|
||||
#include "gem/i915_gem_lmem.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
static void lmem_put_pages(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages)
|
||||
{
|
||||
intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
|
||||
obj->mm.dirty = false;
|
||||
sg_free_table(pages);
|
||||
kfree(pages);
|
||||
}
|
||||
|
||||
static int lmem_get_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
unsigned int flags;
|
||||
struct sg_table *pages;
|
||||
|
||||
flags = I915_ALLOC_MIN_PAGE_SIZE;
|
||||
if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
|
||||
flags |= I915_ALLOC_CONTIGUOUS;
|
||||
|
||||
obj->mm.st_mm_node = intel_region_ttm_node_alloc(obj->mm.region,
|
||||
obj->base.size,
|
||||
flags);
|
||||
if (IS_ERR(obj->mm.st_mm_node))
|
||||
return PTR_ERR(obj->mm.st_mm_node);
|
||||
|
||||
/* Range manager is always contigous */
|
||||
if (obj->mm.region->is_range_manager)
|
||||
obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
|
||||
pages = intel_region_ttm_node_to_st(obj->mm.region, obj->mm.st_mm_node);
|
||||
if (IS_ERR(pages)) {
|
||||
intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
|
||||
return PTR_ERR(pages);
|
||||
}
|
||||
|
||||
__i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));
|
||||
|
||||
if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) {
|
||||
void __iomem *vaddr =
|
||||
i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
|
||||
|
||||
if (!vaddr) {
|
||||
struct sg_table *pages =
|
||||
__i915_gem_object_unset_pages(obj);
|
||||
|
||||
if (!IS_ERR_OR_NULL(pages))
|
||||
lmem_put_pages(obj, pages);
|
||||
}
|
||||
|
||||
memset_io(vaddr, 0, obj->base.size);
|
||||
io_mapping_unmap(vaddr);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
|
||||
.name = "i915_gem_object_lmem",
|
||||
.flags = I915_GEM_OBJECT_HAS_IOMEM,
|
||||
|
||||
.get_pages = lmem_get_pages,
|
||||
.put_pages = lmem_put_pages,
|
||||
.release = i915_gem_object_release_memory_region,
|
||||
};
|
||||
|
||||
void __iomem *
|
||||
i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
|
||||
unsigned long n,
|
||||
@ -87,10 +23,50 @@ i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
|
||||
return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_object_validates_to_lmem - Whether the object is resident in
|
||||
* lmem when pages are present.
|
||||
* @obj: The object to check.
|
||||
*
|
||||
* Migratable objects residency may change from under us if the object is
|
||||
* not pinned or locked. This function is intended to be used to check whether
|
||||
* the object can only reside in lmem when pages are present.
|
||||
*
|
||||
* Return: Whether the object is always resident in lmem when pages are
|
||||
* present.
|
||||
*/
|
||||
bool i915_gem_object_validates_to_lmem(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
|
||||
|
||||
return !i915_gem_object_migratable(obj) &&
|
||||
mr && (mr->type == INTEL_MEMORY_LOCAL ||
|
||||
mr->type == INTEL_MEMORY_STOLEN_LOCAL);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_object_is_lmem - Whether the object is resident in
|
||||
* lmem
|
||||
* @obj: The object to check.
|
||||
*
|
||||
* Even if an object is allowed to migrate and change memory region,
|
||||
* this function checks whether it will always be present in lmem when
|
||||
* valid *or* if that's not the case, whether it's currently resident in lmem.
|
||||
* For migratable and evictable objects, the latter only makes sense when
|
||||
* the object is locked.
|
||||
*
|
||||
* Return: Whether the object migratable but resident in lmem, or not
|
||||
* migratable and will be present in lmem when valid.
|
||||
*/
|
||||
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct intel_memory_region *mr = obj->mm.region;
|
||||
struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
if (i915_gem_object_migratable(obj) &&
|
||||
i915_gem_object_evictable(obj))
|
||||
assert_object_held(obj);
|
||||
#endif
|
||||
return mr && (mr->type == INTEL_MEMORY_LOCAL ||
|
||||
mr->type == INTEL_MEMORY_STOLEN_LOCAL);
|
||||
}
|
||||
@ -103,23 +79,3 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
|
||||
return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
|
||||
size, flags);
|
||||
}
|
||||
|
||||
int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
|
||||
struct drm_i915_gem_object *obj,
|
||||
resource_size_t size,
|
||||
unsigned int flags)
|
||||
{
|
||||
static struct lock_class_key lock_class;
|
||||
struct drm_i915_private *i915 = mem->i915;
|
||||
|
||||
drm_gem_private_object_init(&i915->drm, &obj->base, size);
|
||||
i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class, flags);
|
||||
|
||||
obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
|
||||
|
||||
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
|
||||
|
||||
i915_gem_object_init_memory_region(obj, mem);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -26,9 +26,4 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
|
||||
resource_size_t size,
|
||||
unsigned int flags);
|
||||
|
||||
int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
|
||||
struct drm_i915_gem_object *obj,
|
||||
resource_size_t size,
|
||||
unsigned int flags);
|
||||
|
||||
#endif /* !__I915_GEM_LMEM_H */
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include "i915_gem_mman.h"
|
||||
#include "i915_trace.h"
|
||||
#include "i915_user_extensions.h"
|
||||
#include "i915_gem_ttm.h"
|
||||
#include "i915_vma.h"
|
||||
|
||||
static inline bool
|
||||
@ -623,6 +624,8 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
|
||||
struct i915_mmap_offset *mmo;
|
||||
int err;
|
||||
|
||||
GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops);
|
||||
|
||||
mmo = lookup_mmo(obj, mmap_type);
|
||||
if (mmo)
|
||||
goto out;
|
||||
@ -665,40 +668,47 @@ err:
|
||||
}
|
||||
|
||||
static int
|
||||
__assign_mmap_offset(struct drm_file *file,
|
||||
u32 handle,
|
||||
__assign_mmap_offset(struct drm_i915_gem_object *obj,
|
||||
enum i915_mmap_type mmap_type,
|
||||
u64 *offset)
|
||||
u64 *offset, struct drm_file *file)
|
||||
{
|
||||
struct i915_mmap_offset *mmo;
|
||||
|
||||
if (i915_gem_object_never_mmap(obj))
|
||||
return -ENODEV;
|
||||
|
||||
if (obj->ops->mmap_offset) {
|
||||
*offset = obj->ops->mmap_offset(obj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (mmap_type != I915_MMAP_TYPE_GTT &&
|
||||
!i915_gem_object_has_struct_page(obj) &&
|
||||
!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
|
||||
return -ENODEV;
|
||||
|
||||
mmo = mmap_offset_attach(obj, mmap_type, file);
|
||||
if (IS_ERR(mmo))
|
||||
return PTR_ERR(mmo);
|
||||
|
||||
*offset = drm_vma_node_offset_addr(&mmo->vma_node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
__assign_mmap_offset_handle(struct drm_file *file,
|
||||
u32 handle,
|
||||
enum i915_mmap_type mmap_type,
|
||||
u64 *offset)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_mmap_offset *mmo;
|
||||
int err;
|
||||
|
||||
obj = i915_gem_object_lookup(file, handle);
|
||||
if (!obj)
|
||||
return -ENOENT;
|
||||
|
||||
if (i915_gem_object_never_mmap(obj)) {
|
||||
err = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (mmap_type != I915_MMAP_TYPE_GTT &&
|
||||
!i915_gem_object_has_struct_page(obj) &&
|
||||
!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)) {
|
||||
err = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mmo = mmap_offset_attach(obj, mmap_type, file);
|
||||
if (IS_ERR(mmo)) {
|
||||
err = PTR_ERR(mmo);
|
||||
goto out;
|
||||
}
|
||||
|
||||
*offset = drm_vma_node_offset_addr(&mmo->vma_node);
|
||||
err = 0;
|
||||
out:
|
||||
err = __assign_mmap_offset(obj, mmap_type, offset, file);
|
||||
i915_gem_object_put(obj);
|
||||
return err;
|
||||
}
|
||||
@ -718,7 +728,7 @@ i915_gem_dumb_mmap_offset(struct drm_file *file,
|
||||
else
|
||||
mmap_type = I915_MMAP_TYPE_GTT;
|
||||
|
||||
return __assign_mmap_offset(file, handle, mmap_type, offset);
|
||||
return __assign_mmap_offset_handle(file, handle, mmap_type, offset);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -786,7 +796,7 @@ i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return __assign_mmap_offset(file, args->handle, type, &args->offset);
|
||||
return __assign_mmap_offset_handle(file, args->handle, type, &args->offset);
|
||||
}
|
||||
|
||||
static void vm_open(struct vm_area_struct *vma)
|
||||
@ -890,8 +900,18 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
* destroyed and will be invalid when the vma manager lock
|
||||
* is released.
|
||||
*/
|
||||
mmo = container_of(node, struct i915_mmap_offset, vma_node);
|
||||
obj = i915_gem_object_get_rcu(mmo->obj);
|
||||
if (!node->driver_private) {
|
||||
mmo = container_of(node, struct i915_mmap_offset, vma_node);
|
||||
obj = i915_gem_object_get_rcu(mmo->obj);
|
||||
|
||||
GEM_BUG_ON(obj && obj->ops->mmap_ops);
|
||||
} else {
|
||||
obj = i915_gem_object_get_rcu
|
||||
(container_of(node, struct drm_i915_gem_object,
|
||||
base.vma_node));
|
||||
|
||||
GEM_BUG_ON(obj && !obj->ops->mmap_ops);
|
||||
}
|
||||
}
|
||||
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
|
||||
rcu_read_unlock();
|
||||
@ -913,7 +933,9 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
}
|
||||
|
||||
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vma->vm_private_data = mmo;
|
||||
|
||||
if (i915_gem_object_has_iomem(obj))
|
||||
vma->vm_flags |= VM_IO;
|
||||
|
||||
/*
|
||||
* We keep the ref on mmo->obj, not vm_file, but we require
|
||||
@ -927,6 +949,15 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
/* Drop the initial creation reference, the vma is now holding one. */
|
||||
fput(anon);
|
||||
|
||||
if (obj->ops->mmap_ops) {
|
||||
vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
|
||||
vma->vm_ops = obj->ops->mmap_ops;
|
||||
vma->vm_private_data = node->driver_private;
|
||||
return 0;
|
||||
}
|
||||
|
||||
vma->vm_private_data = mmo;
|
||||
|
||||
switch (mmo->mmap_type) {
|
||||
case I915_MMAP_TYPE_WC:
|
||||
vma->vm_page_prot =
|
||||
|
@ -172,7 +172,7 @@ static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *f
|
||||
}
|
||||
}
|
||||
|
||||
static void __i915_gem_free_object_rcu(struct rcu_head *head)
|
||||
void __i915_gem_free_object_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct drm_i915_gem_object *obj =
|
||||
container_of(head, typeof(*obj), rcu);
|
||||
@ -208,59 +208,69 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
|
||||
}
|
||||
}
|
||||
|
||||
void __i915_gem_free_object(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
trace_i915_gem_object_destroy(obj);
|
||||
|
||||
if (!list_empty(&obj->vma.list)) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
/*
|
||||
* Note that the vma keeps an object reference while
|
||||
* it is active, so it *should* not sleep while we
|
||||
* destroy it. Our debug code errs insits it *might*.
|
||||
* For the moment, play along.
|
||||
*/
|
||||
spin_lock(&obj->vma.lock);
|
||||
while ((vma = list_first_entry_or_null(&obj->vma.list,
|
||||
struct i915_vma,
|
||||
obj_link))) {
|
||||
GEM_BUG_ON(vma->obj != obj);
|
||||
spin_unlock(&obj->vma.lock);
|
||||
|
||||
__i915_vma_put(vma);
|
||||
|
||||
spin_lock(&obj->vma.lock);
|
||||
}
|
||||
spin_unlock(&obj->vma.lock);
|
||||
}
|
||||
|
||||
__i915_gem_object_free_mmaps(obj);
|
||||
|
||||
GEM_BUG_ON(!list_empty(&obj->lut_list));
|
||||
|
||||
atomic_set(&obj->mm.pages_pin_count, 0);
|
||||
__i915_gem_object_put_pages(obj);
|
||||
GEM_BUG_ON(i915_gem_object_has_pages(obj));
|
||||
bitmap_free(obj->bit_17);
|
||||
|
||||
if (obj->base.import_attach)
|
||||
drm_prime_gem_destroy(&obj->base, NULL);
|
||||
|
||||
drm_gem_free_mmap_offset(&obj->base);
|
||||
|
||||
if (obj->ops->release)
|
||||
obj->ops->release(obj);
|
||||
|
||||
if (obj->mm.n_placements > 1)
|
||||
kfree(obj->mm.placements);
|
||||
|
||||
if (obj->shares_resv_from)
|
||||
i915_vm_resv_put(obj->shares_resv_from);
|
||||
}
|
||||
|
||||
static void __i915_gem_free_objects(struct drm_i915_private *i915,
|
||||
struct llist_node *freed)
|
||||
{
|
||||
struct drm_i915_gem_object *obj, *on;
|
||||
|
||||
llist_for_each_entry_safe(obj, on, freed, freed) {
|
||||
trace_i915_gem_object_destroy(obj);
|
||||
|
||||
if (!list_empty(&obj->vma.list)) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
/*
|
||||
* Note that the vma keeps an object reference while
|
||||
* it is active, so it *should* not sleep while we
|
||||
* destroy it. Our debug code errs insits it *might*.
|
||||
* For the moment, play along.
|
||||
*/
|
||||
spin_lock(&obj->vma.lock);
|
||||
while ((vma = list_first_entry_or_null(&obj->vma.list,
|
||||
struct i915_vma,
|
||||
obj_link))) {
|
||||
GEM_BUG_ON(vma->obj != obj);
|
||||
spin_unlock(&obj->vma.lock);
|
||||
|
||||
__i915_vma_put(vma);
|
||||
|
||||
spin_lock(&obj->vma.lock);
|
||||
}
|
||||
spin_unlock(&obj->vma.lock);
|
||||
might_sleep();
|
||||
if (obj->ops->delayed_free) {
|
||||
obj->ops->delayed_free(obj);
|
||||
continue;
|
||||
}
|
||||
|
||||
__i915_gem_object_free_mmaps(obj);
|
||||
|
||||
GEM_BUG_ON(!list_empty(&obj->lut_list));
|
||||
|
||||
atomic_set(&obj->mm.pages_pin_count, 0);
|
||||
__i915_gem_object_put_pages(obj);
|
||||
GEM_BUG_ON(i915_gem_object_has_pages(obj));
|
||||
bitmap_free(obj->bit_17);
|
||||
|
||||
if (obj->base.import_attach)
|
||||
drm_prime_gem_destroy(&obj->base, NULL);
|
||||
|
||||
drm_gem_free_mmap_offset(&obj->base);
|
||||
|
||||
if (obj->ops->release)
|
||||
obj->ops->release(obj);
|
||||
|
||||
if (obj->mm.n_placements > 1)
|
||||
kfree(obj->mm.placements);
|
||||
|
||||
if (obj->shares_resv_from)
|
||||
i915_vm_resv_put(obj->shares_resv_from);
|
||||
__i915_gem_free_object(obj);
|
||||
|
||||
/* But keep the pointer alive for RCU-protected lookups */
|
||||
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
|
||||
@ -318,6 +328,7 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
||||
* worker and performing frees directly from subsequent allocations for
|
||||
* crude but effective memory throttling.
|
||||
*/
|
||||
|
||||
if (llist_add(&obj->freed, &i915->mm.free_list))
|
||||
queue_work(i915->wq, &i915->mm.free_work);
|
||||
}
|
||||
@ -410,6 +421,60 @@ int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_object_evictable - Whether object is likely evictable after unbind.
|
||||
* @obj: The object to check
|
||||
*
|
||||
* This function checks whether the object is likely unvictable after unbind.
|
||||
* If the object is not locked when checking, the result is only advisory.
|
||||
* If the object is locked when checking, and the function returns true,
|
||||
* then an eviction should indeed be possible. But since unlocked vma
|
||||
* unpinning and unbinding is currently possible, the object can actually
|
||||
* become evictable even if this function returns false.
|
||||
*
|
||||
* Return: true if the object may be evictable. False otherwise.
|
||||
*/
|
||||
bool i915_gem_object_evictable(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
int pin_count = atomic_read(&obj->mm.pages_pin_count);
|
||||
|
||||
if (!pin_count)
|
||||
return true;
|
||||
|
||||
spin_lock(&obj->vma.lock);
|
||||
list_for_each_entry(vma, &obj->vma.list, obj_link) {
|
||||
if (i915_vma_is_pinned(vma)) {
|
||||
spin_unlock(&obj->vma.lock);
|
||||
return false;
|
||||
}
|
||||
if (atomic_read(&vma->pages_count))
|
||||
pin_count--;
|
||||
}
|
||||
spin_unlock(&obj->vma.lock);
|
||||
GEM_WARN_ON(pin_count < 0);
|
||||
|
||||
return pin_count == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_object_migratable - Whether the object is migratable out of the
|
||||
* current region.
|
||||
* @obj: Pointer to the object.
|
||||
*
|
||||
* Return: Whether the object is allowed to be resident in other
|
||||
* regions than the current while pages are present.
|
||||
*/
|
||||
bool i915_gem_object_migratable(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
|
||||
|
||||
if (!mr)
|
||||
return false;
|
||||
|
||||
return obj->mm.n_placements > 1;
|
||||
}
|
||||
|
||||
void i915_gem_init__objects(struct drm_i915_private *i915)
|
||||
{
|
||||
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
|
||||
|
@ -200,6 +200,9 @@ static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
|
||||
|
||||
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (obj->ops->adjust_lru)
|
||||
obj->ops->adjust_lru(obj);
|
||||
|
||||
dma_resv_unlock(obj->base.resv);
|
||||
}
|
||||
|
||||
@ -339,14 +342,14 @@ struct scatterlist *
|
||||
__i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
|
||||
struct i915_gem_object_page_iter *iter,
|
||||
unsigned int n,
|
||||
unsigned int *offset, bool allow_alloc);
|
||||
unsigned int *offset, bool allow_alloc, bool dma);
|
||||
|
||||
static inline struct scatterlist *
|
||||
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
|
||||
unsigned int n,
|
||||
unsigned int *offset, bool allow_alloc)
|
||||
{
|
||||
return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, allow_alloc);
|
||||
return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, allow_alloc, false);
|
||||
}
|
||||
|
||||
static inline struct scatterlist *
|
||||
@ -354,7 +357,7 @@ i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj,
|
||||
unsigned int n,
|
||||
unsigned int *offset, bool allow_alloc)
|
||||
{
|
||||
return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, allow_alloc);
|
||||
return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, allow_alloc, true);
|
||||
}
|
||||
|
||||
struct page *
|
||||
@ -587,6 +590,16 @@ int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset,
|
||||
|
||||
bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
|
||||
|
||||
void __i915_gem_free_object_rcu(struct rcu_head *head);
|
||||
|
||||
void __i915_gem_free_object(struct drm_i915_gem_object *obj);
|
||||
|
||||
bool i915_gem_object_evictable(struct drm_i915_gem_object *obj);
|
||||
|
||||
bool i915_gem_object_migratable(struct drm_i915_gem_object *obj);
|
||||
|
||||
bool i915_gem_object_validates_to_lmem(struct drm_i915_gem_object *obj);
|
||||
|
||||
#ifdef CONFIG_MMU_NOTIFIER
|
||||
static inline bool
|
||||
i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
|
||||
|
@ -61,10 +61,26 @@ struct drm_i915_gem_object_ops {
|
||||
const struct drm_i915_gem_pread *arg);
|
||||
int (*pwrite)(struct drm_i915_gem_object *obj,
|
||||
const struct drm_i915_gem_pwrite *arg);
|
||||
u64 (*mmap_offset)(struct drm_i915_gem_object *obj);
|
||||
|
||||
int (*dmabuf_export)(struct drm_i915_gem_object *obj);
|
||||
|
||||
/**
|
||||
* adjust_lru - notify that the madvise value was updated
|
||||
* @obj: The gem object
|
||||
*
|
||||
* The madvise value may have been updated, or object was recently
|
||||
* referenced so act accordingly (Perhaps changing an LRU list etc).
|
||||
*/
|
||||
void (*adjust_lru)(struct drm_i915_gem_object *obj);
|
||||
|
||||
/**
|
||||
* delayed_free - Override the default delayed free implementation
|
||||
*/
|
||||
void (*delayed_free)(struct drm_i915_gem_object *obj);
|
||||
void (*release)(struct drm_i915_gem_object *obj);
|
||||
|
||||
const struct vm_operations_struct *mmap_ops;
|
||||
const char *name; /* friendly name for debug, e.g. lockdep classes */
|
||||
};
|
||||
|
||||
@ -187,12 +203,14 @@ struct drm_i915_gem_object {
|
||||
#define I915_BO_ALLOC_VOLATILE BIT(1)
|
||||
#define I915_BO_ALLOC_STRUCT_PAGE BIT(2)
|
||||
#define I915_BO_ALLOC_CPU_CLEAR BIT(3)
|
||||
#define I915_BO_ALLOC_USER BIT(4)
|
||||
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
|
||||
I915_BO_ALLOC_VOLATILE | \
|
||||
I915_BO_ALLOC_STRUCT_PAGE | \
|
||||
I915_BO_ALLOC_CPU_CLEAR)
|
||||
#define I915_BO_READONLY BIT(4)
|
||||
#define I915_TILING_QUIRK_BIT 5 /* unknown swizzling; do not release! */
|
||||
I915_BO_ALLOC_CPU_CLEAR | \
|
||||
I915_BO_ALLOC_USER)
|
||||
#define I915_BO_READONLY BIT(5)
|
||||
#define I915_TILING_QUIRK_BIT 6 /* unknown swizzling; do not release! */
|
||||
|
||||
/*
|
||||
* Is the object to be mapped as read-only to the GPU
|
||||
@ -310,6 +328,12 @@ struct drm_i915_gem_object {
|
||||
bool dirty:1;
|
||||
} mm;
|
||||
|
||||
struct {
|
||||
struct sg_table *cached_io_st;
|
||||
struct i915_gem_object_page_iter get_io_page;
|
||||
bool created:1;
|
||||
} ttm;
|
||||
|
||||
/** Record of address bit 17 of each page at last unbind. */
|
||||
unsigned long *bit_17;
|
||||
|
||||
|
@ -467,9 +467,8 @@ __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
|
||||
struct i915_gem_object_page_iter *iter,
|
||||
unsigned int n,
|
||||
unsigned int *offset,
|
||||
bool allow_alloc)
|
||||
bool allow_alloc, bool dma)
|
||||
{
|
||||
const bool dma = iter == &obj->mm.get_dma_page;
|
||||
struct scatterlist *sg;
|
||||
unsigned int idx, count;
|
||||
|
||||
|
@ -18,11 +18,7 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
|
||||
|
||||
mutex_lock(&mem->objects.lock);
|
||||
|
||||
if (obj->flags & I915_BO_ALLOC_VOLATILE)
|
||||
list_add(&obj->mm.region_link, &mem->objects.purgeable);
|
||||
else
|
||||
list_add(&obj->mm.region_link, &mem->objects.list);
|
||||
|
||||
list_add(&obj->mm.region_link, &mem->objects.list);
|
||||
mutex_unlock(&mem->objects.lock);
|
||||
}
|
||||
|
||||
|
647
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
Normal file
647
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
Normal file
@ -0,0 +1,647 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2021 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_memory_region.h"
|
||||
#include "intel_region_ttm.h"
|
||||
|
||||
#include "gem/i915_gem_object.h"
|
||||
#include "gem/i915_gem_region.h"
|
||||
#include "gem/i915_gem_ttm.h"
|
||||
#include "gem/i915_gem_mman.h"
|
||||
|
||||
#define I915_PL_LMEM0 TTM_PL_PRIV
|
||||
#define I915_PL_SYSTEM TTM_PL_SYSTEM
|
||||
#define I915_PL_STOLEN TTM_PL_VRAM
|
||||
#define I915_PL_GGTT TTM_PL_TT
|
||||
|
||||
#define I915_TTM_PRIO_PURGE 0
|
||||
#define I915_TTM_PRIO_NO_PAGES 1
|
||||
#define I915_TTM_PRIO_HAS_PAGES 2
|
||||
|
||||
/**
|
||||
* struct i915_ttm_tt - TTM page vector with additional private information
|
||||
* @ttm: The base TTM page vector.
|
||||
* @dev: The struct device used for dma mapping and unmapping.
|
||||
* @cached_st: The cached scatter-gather table.
|
||||
*
|
||||
* Note that DMA may be going on right up to the point where the page-
|
||||
* vector is unpopulated in delayed destroy. Hence keep the
|
||||
* scatter-gather table mapped and cached up to that point. This is
|
||||
* different from the cached gem object io scatter-gather table which
|
||||
* doesn't have an associated dma mapping.
|
||||
*/
|
||||
struct i915_ttm_tt {
|
||||
struct ttm_tt ttm;
|
||||
struct device *dev;
|
||||
struct sg_table *cached_st;
|
||||
};
|
||||
|
||||
static const struct ttm_place lmem0_sys_placement_flags[] = {
|
||||
{
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.mem_type = I915_PL_LMEM0,
|
||||
.flags = 0,
|
||||
}, {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.mem_type = I915_PL_SYSTEM,
|
||||
.flags = 0,
|
||||
}
|
||||
};
|
||||
|
||||
static struct ttm_placement i915_lmem0_placement = {
|
||||
.num_placement = 1,
|
||||
.placement = &lmem0_sys_placement_flags[0],
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &lmem0_sys_placement_flags[0],
|
||||
};
|
||||
|
||||
static struct ttm_placement i915_sys_placement = {
|
||||
.num_placement = 1,
|
||||
.placement = &lmem0_sys_placement_flags[1],
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &lmem0_sys_placement_flags[1],
|
||||
};
|
||||
|
||||
static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj);
|
||||
|
||||
static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags)
|
||||
{
|
||||
struct ttm_resource_manager *man =
|
||||
ttm_manager_type(bo->bdev, bo->resource->mem_type);
|
||||
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
|
||||
struct i915_ttm_tt *i915_tt;
|
||||
int ret;
|
||||
|
||||
i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL);
|
||||
if (!i915_tt)
|
||||
return NULL;
|
||||
|
||||
if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
|
||||
man->use_tt)
|
||||
page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
|
||||
|
||||
ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, ttm_write_combined);
|
||||
if (ret) {
|
||||
kfree(i915_tt);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
i915_tt->dev = obj->base.dev->dev;
|
||||
|
||||
return &i915_tt->ttm;
|
||||
}
|
||||
|
||||
static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
|
||||
{
|
||||
struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
|
||||
|
||||
if (i915_tt->cached_st) {
|
||||
dma_unmap_sgtable(i915_tt->dev, i915_tt->cached_st,
|
||||
DMA_BIDIRECTIONAL, 0);
|
||||
sg_free_table(i915_tt->cached_st);
|
||||
kfree(i915_tt->cached_st);
|
||||
i915_tt->cached_st = NULL;
|
||||
}
|
||||
ttm_pool_free(&bdev->pool, ttm);
|
||||
}
|
||||
|
||||
static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
|
||||
{
|
||||
struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
|
||||
|
||||
ttm_tt_destroy_common(bdev, ttm);
|
||||
kfree(i915_tt);
|
||||
}
|
||||
|
||||
static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
|
||||
const struct ttm_place *place)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
|
||||
|
||||
/* Will do for now. Our pinned objects are still on TTM's LRU lists */
|
||||
if (!i915_gem_object_evictable(obj))
|
||||
return false;
|
||||
|
||||
/* This isn't valid with a buddy allocator */
|
||||
return ttm_bo_eviction_valuable(bo, place);
|
||||
}
|
||||
|
||||
static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement)
|
||||
{
|
||||
*placement = i915_sys_placement;
|
||||
}
|
||||
|
||||
static int i915_ttm_move_notify(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = __i915_gem_object_put_pages(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct radix_tree_iter iter;
|
||||
void __rcu **slot;
|
||||
|
||||
if (!obj->ttm.cached_io_st)
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
radix_tree_for_each_slot(slot, &obj->ttm.get_io_page.radix, &iter, 0)
|
||||
radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index);
|
||||
rcu_read_unlock();
|
||||
|
||||
sg_free_table(obj->ttm.cached_io_st);
|
||||
kfree(obj->ttm.cached_io_st);
|
||||
obj->ttm.cached_io_st = NULL;
|
||||
}
|
||||
|
||||
static void i915_ttm_purge(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = true,
|
||||
.no_wait_gpu = false,
|
||||
};
|
||||
struct ttm_placement place = {};
|
||||
int ret;
|
||||
|
||||
if (obj->mm.madv == __I915_MADV_PURGED)
|
||||
return;
|
||||
|
||||
/* TTM's purge interface. Note that we might be reentering. */
|
||||
ret = ttm_bo_validate(bo, &place, &ctx);
|
||||
|
||||
if (!ret) {
|
||||
i915_ttm_free_cached_io_st(obj);
|
||||
obj->mm.madv = __I915_MADV_PURGED;
|
||||
}
|
||||
}
|
||||
|
||||
static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
|
||||
int ret = i915_ttm_move_notify(bo);
|
||||
|
||||
GEM_WARN_ON(ret);
|
||||
GEM_WARN_ON(obj->ttm.cached_io_st);
|
||||
if (!ret && obj->mm.madv != I915_MADV_WILLNEED)
|
||||
i915_ttm_purge(obj);
|
||||
}
|
||||
|
||||
static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
|
||||
|
||||
if (likely(obj)) {
|
||||
/* This releases all gem object bindings to the backend. */
|
||||
__i915_gem_free_object(obj);
|
||||
}
|
||||
}
|
||||
|
||||
static struct intel_memory_region *
|
||||
i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type)
|
||||
{
|
||||
struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
|
||||
|
||||
/* There's some room for optimization here... */
|
||||
GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM &&
|
||||
ttm_mem_type < I915_PL_LMEM0);
|
||||
if (ttm_mem_type == I915_PL_SYSTEM)
|
||||
return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM,
|
||||
0);
|
||||
|
||||
return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL,
|
||||
ttm_mem_type - I915_PL_LMEM0);
|
||||
}
|
||||
|
||||
static struct sg_table *i915_ttm_tt_get_st(struct ttm_tt *ttm)
|
||||
{
|
||||
struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
|
||||
struct scatterlist *sg;
|
||||
struct sg_table *st;
|
||||
int ret;
|
||||
|
||||
if (i915_tt->cached_st)
|
||||
return i915_tt->cached_st;
|
||||
|
||||
st = kzalloc(sizeof(*st), GFP_KERNEL);
|
||||
if (!st)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
sg = __sg_alloc_table_from_pages
|
||||
(st, ttm->pages, ttm->num_pages, 0,
|
||||
(unsigned long)ttm->num_pages << PAGE_SHIFT,
|
||||
i915_sg_segment_size(), NULL, 0, GFP_KERNEL);
|
||||
if (IS_ERR(sg)) {
|
||||
kfree(st);
|
||||
return ERR_CAST(sg);
|
||||
}
|
||||
|
||||
ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
|
||||
if (ret) {
|
||||
sg_free_table(st);
|
||||
kfree(st);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
i915_tt->cached_st = st;
|
||||
return st;
|
||||
}
|
||||
|
||||
static struct sg_table *
|
||||
i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
|
||||
struct ttm_resource *res)
|
||||
{
|
||||
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
|
||||
struct ttm_resource_manager *man =
|
||||
ttm_manager_type(bo->bdev, res->mem_type);
|
||||
|
||||
if (man->use_tt)
|
||||
return i915_ttm_tt_get_st(bo->ttm);
|
||||
|
||||
return intel_region_ttm_node_to_st(obj->mm.region, res);
|
||||
}
|
||||
|
||||
static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
|
||||
struct ttm_operation_ctx *ctx,
|
||||
struct ttm_resource *dst_mem,
|
||||
struct ttm_place *hop)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
|
||||
struct ttm_resource_manager *dst_man =
|
||||
ttm_manager_type(bo->bdev, dst_mem->mem_type);
|
||||
struct ttm_resource_manager *src_man =
|
||||
ttm_manager_type(bo->bdev, bo->resource->mem_type);
|
||||
struct intel_memory_region *dst_reg, *src_reg;
|
||||
union {
|
||||
struct ttm_kmap_iter_tt tt;
|
||||
struct ttm_kmap_iter_iomap io;
|
||||
} _dst_iter, _src_iter;
|
||||
struct ttm_kmap_iter *dst_iter, *src_iter;
|
||||
struct sg_table *dst_st;
|
||||
int ret;
|
||||
|
||||
dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type);
|
||||
src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type);
|
||||
GEM_BUG_ON(!dst_reg || !src_reg);
|
||||
|
||||
/* Sync for now. We could do the actual copy async. */
|
||||
ret = ttm_bo_wait_ctx(bo, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_ttm_move_notify(bo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (obj->mm.madv != I915_MADV_WILLNEED) {
|
||||
i915_ttm_purge(obj);
|
||||
ttm_resource_free(bo, &dst_mem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Populate ttm with pages if needed. Typically system memory. */
|
||||
if (bo->ttm && (dst_man->use_tt ||
|
||||
(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED))) {
|
||||
ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
dst_st = i915_ttm_resource_get_st(obj, dst_mem);
|
||||
if (IS_ERR(dst_st))
|
||||
return PTR_ERR(dst_st);
|
||||
|
||||
/* If we start mapping GGTT, we can no longer use man::use_tt here. */
|
||||
dst_iter = dst_man->use_tt ?
|
||||
ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm) :
|
||||
ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap,
|
||||
dst_st, dst_reg->region.start);
|
||||
|
||||
src_iter = src_man->use_tt ?
|
||||
ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) :
|
||||
ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap,
|
||||
obj->ttm.cached_io_st,
|
||||
src_reg->region.start);
|
||||
|
||||
ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
|
||||
ttm_bo_move_sync_cleanup(bo, dst_mem);
|
||||
i915_ttm_free_cached_io_st(obj);
|
||||
|
||||
if (!dst_man->use_tt) {
|
||||
obj->ttm.cached_io_st = dst_st;
|
||||
obj->ttm.get_io_page.sg_pos = dst_st->sgl;
|
||||
obj->ttm.get_io_page.sg_idx = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
|
||||
{
|
||||
if (mem->mem_type < I915_PL_LMEM0)
|
||||
return 0;
|
||||
|
||||
mem->bus.caching = ttm_write_combined;
|
||||
mem->bus.is_iomem = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
|
||||
unsigned long page_offset)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
|
||||
unsigned long base = obj->mm.region->iomap.base - obj->mm.region->region.start;
|
||||
struct scatterlist *sg;
|
||||
unsigned int ofs;
|
||||
|
||||
GEM_WARN_ON(bo->ttm);
|
||||
|
||||
sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true, true);
|
||||
|
||||
return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
|
||||
}
|
||||
|
||||
static struct ttm_device_funcs i915_ttm_bo_driver = {
|
||||
.ttm_tt_create = i915_ttm_tt_create,
|
||||
.ttm_tt_unpopulate = i915_ttm_tt_unpopulate,
|
||||
.ttm_tt_destroy = i915_ttm_tt_destroy,
|
||||
.eviction_valuable = i915_ttm_eviction_valuable,
|
||||
.evict_flags = i915_ttm_evict_flags,
|
||||
.move = i915_ttm_move,
|
||||
.swap_notify = i915_ttm_swap_notify,
|
||||
.delete_mem_notify = i915_ttm_delete_mem_notify,
|
||||
.io_mem_reserve = i915_ttm_io_mem_reserve,
|
||||
.io_mem_pfn = i915_ttm_io_mem_pfn,
|
||||
};
|
||||
|
||||
/**
|
||||
* i915_ttm_driver - Return a pointer to the TTM device funcs
|
||||
*
|
||||
* Return: Pointer to statically allocated TTM device funcs.
|
||||
*/
|
||||
struct ttm_device_funcs *i915_ttm_driver(void)
|
||||
{
|
||||
return &i915_ttm_bo_driver;
|
||||
}
|
||||
|
||||
static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = true,
|
||||
.no_wait_gpu = false,
|
||||
};
|
||||
struct sg_table *st;
|
||||
int ret;
|
||||
|
||||
/* Move to the requested placement. */
|
||||
ret = ttm_bo_validate(bo, &i915_lmem0_placement, &ctx);
|
||||
if (ret)
|
||||
return ret == -ENOSPC ? -ENXIO : ret;
|
||||
|
||||
/* Object either has a page vector or is an iomem object */
|
||||
st = bo->ttm ? i915_ttm_tt_get_st(bo->ttm) : obj->ttm.cached_io_st;
|
||||
if (IS_ERR(st))
|
||||
return PTR_ERR(st);
|
||||
|
||||
__i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl));
|
||||
|
||||
i915_ttm_adjust_lru(obj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *st)
|
||||
{
|
||||
/*
|
||||
* We're currently not called from a shrinker, so put_pages()
|
||||
* typically means the object is about to destroyed, or called
|
||||
* from move_notify(). So just avoid doing much for now.
|
||||
* If the object is not destroyed next, The TTM eviction logic
|
||||
* and shrinkers will move it out if needed.
|
||||
*/
|
||||
|
||||
i915_ttm_adjust_lru(obj);
|
||||
}
|
||||
|
||||
static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
|
||||
|
||||
/*
|
||||
* Don't manipulate the TTM LRUs while in TTM bo destruction.
|
||||
* We're called through i915_ttm_delete_mem_notify().
|
||||
*/
|
||||
if (!kref_read(&bo->kref))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Put on the correct LRU list depending on the MADV status
|
||||
*/
|
||||
spin_lock(&bo->bdev->lru_lock);
|
||||
if (obj->mm.madv != I915_MADV_WILLNEED) {
|
||||
bo->priority = I915_TTM_PRIO_PURGE;
|
||||
} else if (!i915_gem_object_has_pages(obj)) {
|
||||
if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
|
||||
bo->priority = I915_TTM_PRIO_HAS_PAGES;
|
||||
} else {
|
||||
if (bo->priority > I915_TTM_PRIO_NO_PAGES)
|
||||
bo->priority = I915_TTM_PRIO_NO_PAGES;
|
||||
}
|
||||
|
||||
ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
|
||||
spin_unlock(&bo->bdev->lru_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* TTM-backed gem object destruction requires some clarification.
|
||||
* Basically we have two possibilities here. We can either rely on the
|
||||
* i915 delayed destruction and put the TTM object when the object
|
||||
* is idle. This would be detected by TTM which would bypass the
|
||||
* TTM delayed destroy handling. The other approach is to put the TTM
|
||||
* object early and rely on the TTM destroyed handling, and then free
|
||||
* the leftover parts of the GEM object once TTM's destroyed list handling is
|
||||
* complete. For now, we rely on the latter for two reasons:
|
||||
* a) TTM can evict an object even when it's on the delayed destroy list,
|
||||
* which in theory allows for complete eviction.
|
||||
* b) There is work going on in TTM to allow freeing an object even when
|
||||
* it's not idle, and using the TTM destroyed list handling could help us
|
||||
* benefit from that.
|
||||
*/
|
||||
static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (obj->ttm.created) {
|
||||
ttm_bo_put(i915_gem_to_ttm(obj));
|
||||
} else {
|
||||
__i915_gem_free_object(obj);
|
||||
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
|
||||
}
|
||||
}
|
||||
|
||||
static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
|
||||
{
|
||||
struct vm_area_struct *area = vmf->vma;
|
||||
struct drm_i915_gem_object *obj =
|
||||
i915_ttm_to_gem(area->vm_private_data);
|
||||
|
||||
/* Sanity check that we allow writing into this object */
|
||||
if (unlikely(i915_gem_object_is_readonly(obj) &&
|
||||
area->vm_flags & VM_WRITE))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
return ttm_bo_vm_fault(vmf);
|
||||
}
|
||||
|
||||
static int
|
||||
vm_access_ttm(struct vm_area_struct *area, unsigned long addr,
|
||||
void *buf, int len, int write)
|
||||
{
|
||||
struct drm_i915_gem_object *obj =
|
||||
i915_ttm_to_gem(area->vm_private_data);
|
||||
|
||||
if (i915_gem_object_is_readonly(obj) && write)
|
||||
return -EACCES;
|
||||
|
||||
return ttm_bo_vm_access(area, addr, buf, len, write);
|
||||
}
|
||||
|
||||
static void ttm_vm_open(struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_i915_gem_object *obj =
|
||||
i915_ttm_to_gem(vma->vm_private_data);
|
||||
|
||||
GEM_BUG_ON(!obj);
|
||||
i915_gem_object_get(obj);
|
||||
}
|
||||
|
||||
static void ttm_vm_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_i915_gem_object *obj =
|
||||
i915_ttm_to_gem(vma->vm_private_data);
|
||||
|
||||
GEM_BUG_ON(!obj);
|
||||
i915_gem_object_put(obj);
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct vm_ops_ttm = {
|
||||
.fault = vm_fault_ttm,
|
||||
.access = vm_access_ttm,
|
||||
.open = ttm_vm_open,
|
||||
.close = ttm_vm_close,
|
||||
};
|
||||
|
||||
static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
/* The ttm_bo must be allocated with I915_BO_ALLOC_USER */
|
||||
GEM_BUG_ON(!drm_mm_node_allocated(&obj->base.vma_node.vm_node));
|
||||
|
||||
return drm_vma_node_offset_addr(&obj->base.vma_node);
|
||||
}
|
||||
|
||||
const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
|
||||
.name = "i915_gem_object_ttm",
|
||||
.flags = I915_GEM_OBJECT_HAS_IOMEM,
|
||||
|
||||
.get_pages = i915_ttm_get_pages,
|
||||
.put_pages = i915_ttm_put_pages,
|
||||
.truncate = i915_ttm_purge,
|
||||
.adjust_lru = i915_ttm_adjust_lru,
|
||||
.delayed_free = i915_ttm_delayed_free,
|
||||
.mmap_offset = i915_ttm_mmap_offset,
|
||||
.mmap_ops = &vm_ops_ttm,
|
||||
};
|
||||
|
||||
void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
|
||||
|
||||
i915_gem_object_release_memory_region(obj);
|
||||
mutex_destroy(&obj->ttm.get_io_page.lock);
|
||||
if (obj->ttm.created)
|
||||
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object
|
||||
* @mem: The initial memory region for the object.
|
||||
* @obj: The gem object.
|
||||
* @size: Object size in bytes.
|
||||
* @flags: gem object flags.
|
||||
*
|
||||
* Return: 0 on success, negative error code on failure.
|
||||
*/
|
||||
int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
|
||||
struct drm_i915_gem_object *obj,
|
||||
resource_size_t size,
|
||||
unsigned int flags)
|
||||
{
|
||||
static struct lock_class_key lock_class;
|
||||
struct drm_i915_private *i915 = mem->i915;
|
||||
enum ttm_bo_type bo_type;
|
||||
size_t alignment = 0;
|
||||
int ret;
|
||||
|
||||
/* Adjust alignment to GPU- and CPU huge page sizes. */
|
||||
|
||||
if (mem->is_range_manager) {
|
||||
if (size >= SZ_1G)
|
||||
alignment = SZ_1G >> PAGE_SHIFT;
|
||||
else if (size >= SZ_2M)
|
||||
alignment = SZ_2M >> PAGE_SHIFT;
|
||||
else if (size >= SZ_64K)
|
||||
alignment = SZ_64K >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
drm_gem_private_object_init(&i915->drm, &obj->base, size);
|
||||
i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
|
||||
i915_gem_object_init_memory_region(obj, mem);
|
||||
i915_gem_object_make_unshrinkable(obj);
|
||||
obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
|
||||
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
|
||||
INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
|
||||
mutex_init(&obj->ttm.get_io_page.lock);
|
||||
|
||||
bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
|
||||
ttm_bo_type_kernel;
|
||||
|
||||
/*
|
||||
* If this function fails, it will call the destructor, but
|
||||
* our caller still owns the object. So no freeing in the
|
||||
* destructor until obj->ttm.created is true.
|
||||
* Similarly, in delayed_destroy, we can't call ttm_bo_put()
|
||||
* until successful initialization.
|
||||
*/
|
||||
obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
|
||||
ret = ttm_bo_init(&i915->bdev, i915_gem_to_ttm(obj), size,
|
||||
bo_type, &i915_sys_placement, alignment,
|
||||
true, NULL, NULL, i915_ttm_bo_destroy);
|
||||
|
||||
if (!ret)
|
||||
obj->ttm.created = true;
|
||||
|
||||
/* i915 wants -ENXIO when out of memory region space. */
|
||||
return (ret == -ENOSPC) ? -ENXIO : ret;
|
||||
}
|
48
drivers/gpu/drm/i915/gem/i915_gem_ttm.h
Normal file
48
drivers/gpu/drm/i915/gem/i915_gem_ttm.h
Normal file
@ -0,0 +1,48 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2021 Intel Corporation
|
||||
*/
|
||||
#ifndef _I915_GEM_TTM_H_
|
||||
#define _I915_GEM_TTM_H_
|
||||
|
||||
#include "gem/i915_gem_object_types.h"
|
||||
|
||||
/**
|
||||
* i915_gem_to_ttm - Convert a struct drm_i915_gem_object to a
|
||||
* struct ttm_buffer_object.
|
||||
* @obj: Pointer to the gem object.
|
||||
*
|
||||
* Return: Pointer to the embedded struct ttm_buffer_object.
|
||||
*/
|
||||
static inline struct ttm_buffer_object *
|
||||
i915_gem_to_ttm(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return &obj->__do_not_access;
|
||||
}
|
||||
|
||||
/*
|
||||
* i915 ttm gem object destructor. Internal use only.
|
||||
*/
|
||||
void i915_ttm_bo_destroy(struct ttm_buffer_object *bo);
|
||||
|
||||
/**
|
||||
* i915_ttm_to_gem - Convert a struct ttm_buffer_object to an embedding
|
||||
* struct drm_i915_gem_object.
|
||||
*
|
||||
* Return: Pointer to the embedding struct ttm_buffer_object, or NULL
|
||||
* if the object was not an i915 ttm object.
|
||||
*/
|
||||
static inline struct drm_i915_gem_object *
|
||||
i915_ttm_to_gem(struct ttm_buffer_object *bo)
|
||||
{
|
||||
if (GEM_WARN_ON(bo->destroy != i915_ttm_bo_destroy))
|
||||
return NULL;
|
||||
|
||||
return container_of(bo, struct drm_i915_gem_object, __do_not_access);
|
||||
}
|
||||
|
||||
int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
|
||||
struct drm_i915_gem_object *obj,
|
||||
resource_size_t size,
|
||||
unsigned int flags);
|
||||
#endif
|
@ -578,16 +578,17 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
|
||||
int expected)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_mmap_offset *mmo;
|
||||
u64 offset;
|
||||
int ret;
|
||||
|
||||
obj = i915_gem_object_create_internal(i915, size);
|
||||
if (IS_ERR(obj))
|
||||
return false;
|
||||
return expected && expected == PTR_ERR(obj);
|
||||
|
||||
mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
|
||||
ret = __assign_mmap_offset(obj, I915_MMAP_TYPE_GTT, &offset, NULL);
|
||||
i915_gem_object_put(obj);
|
||||
|
||||
return PTR_ERR_OR_ZERO(mmo) == expected;
|
||||
return ret == expected;
|
||||
}
|
||||
|
||||
static void disable_retire_worker(struct drm_i915_private *i915)
|
||||
@ -622,8 +623,8 @@ static int igt_mmap_offset_exhaustion(void *arg)
|
||||
struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_mm_node *hole, *next;
|
||||
struct i915_mmap_offset *mmo;
|
||||
int loop, err = 0;
|
||||
u64 offset;
|
||||
|
||||
/* Disable background reaper */
|
||||
disable_retire_worker(i915);
|
||||
@ -684,13 +685,13 @@ static int igt_mmap_offset_exhaustion(void *arg)
|
||||
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
|
||||
if (IS_ERR(obj)) {
|
||||
err = PTR_ERR(obj);
|
||||
pr_err("Unable to create object for reclaimed hole\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
|
||||
if (IS_ERR(mmo)) {
|
||||
err = __assign_mmap_offset(obj, I915_MMAP_TYPE_GTT, &offset, NULL);
|
||||
if (err) {
|
||||
pr_err("Unable to insert object into reclaimed hole\n");
|
||||
err = PTR_ERR(mmo);
|
||||
goto err_obj;
|
||||
}
|
||||
|
||||
@ -865,10 +866,10 @@ static int __igt_mmap(struct drm_i915_private *i915,
|
||||
struct drm_i915_gem_object *obj,
|
||||
enum i915_mmap_type type)
|
||||
{
|
||||
struct i915_mmap_offset *mmo;
|
||||
struct vm_area_struct *area;
|
||||
unsigned long addr;
|
||||
int err, i;
|
||||
u64 offset;
|
||||
|
||||
if (!can_mmap(obj, type))
|
||||
return 0;
|
||||
@ -879,11 +880,11 @@ static int __igt_mmap(struct drm_i915_private *i915,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mmo = mmap_offset_attach(obj, type, NULL);
|
||||
if (IS_ERR(mmo))
|
||||
return PTR_ERR(mmo);
|
||||
err = __assign_mmap_offset(obj, type, &offset, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
|
||||
addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
|
||||
if (IS_ERR_VALUE(addr))
|
||||
return addr;
|
||||
|
||||
@ -897,13 +898,6 @@ static int __igt_mmap(struct drm_i915_private *i915,
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
if (area->vm_private_data != mmo) {
|
||||
pr_err("%s: vm_area_struct did not point back to our mmap_offset object!\n",
|
||||
obj->mm.region->name);
|
||||
err = -EINVAL;
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
for (i = 0; i < obj->base.size / sizeof(u32); i++) {
|
||||
u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
|
||||
u32 x;
|
||||
@ -961,7 +955,7 @@ static int igt_mmap(void *arg)
|
||||
struct drm_i915_gem_object *obj;
|
||||
int err;
|
||||
|
||||
obj = i915_gem_object_create_region(mr, sizes[i], 0);
|
||||
obj = i915_gem_object_create_region(mr, sizes[i], I915_BO_ALLOC_USER);
|
||||
if (obj == ERR_PTR(-ENODEV))
|
||||
continue;
|
||||
|
||||
@ -1004,12 +998,12 @@ static int __igt_mmap_access(struct drm_i915_private *i915,
|
||||
struct drm_i915_gem_object *obj,
|
||||
enum i915_mmap_type type)
|
||||
{
|
||||
struct i915_mmap_offset *mmo;
|
||||
unsigned long __user *ptr;
|
||||
unsigned long A, B;
|
||||
unsigned long x, y;
|
||||
unsigned long addr;
|
||||
int err;
|
||||
u64 offset;
|
||||
|
||||
memset(&A, 0xAA, sizeof(A));
|
||||
memset(&B, 0xBB, sizeof(B));
|
||||
@ -1017,11 +1011,11 @@ static int __igt_mmap_access(struct drm_i915_private *i915,
|
||||
if (!can_mmap(obj, type) || !can_access(obj))
|
||||
return 0;
|
||||
|
||||
mmo = mmap_offset_attach(obj, type, NULL);
|
||||
if (IS_ERR(mmo))
|
||||
return PTR_ERR(mmo);
|
||||
err = __assign_mmap_offset(obj, type, &offset, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
|
||||
addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
|
||||
if (IS_ERR_VALUE(addr))
|
||||
return addr;
|
||||
ptr = (unsigned long __user *)addr;
|
||||
@ -1081,7 +1075,7 @@ static int igt_mmap_access(void *arg)
|
||||
struct drm_i915_gem_object *obj;
|
||||
int err;
|
||||
|
||||
obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
|
||||
obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER);
|
||||
if (obj == ERR_PTR(-ENODEV))
|
||||
continue;
|
||||
|
||||
@ -1111,11 +1105,11 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
|
||||
enum i915_mmap_type type)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
struct i915_mmap_offset *mmo;
|
||||
unsigned long addr;
|
||||
u32 __user *ux;
|
||||
u32 bbe;
|
||||
int err;
|
||||
u64 offset;
|
||||
|
||||
/*
|
||||
* Verify that the mmap access into the backing store aligns with
|
||||
@ -1132,11 +1126,11 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mmo = mmap_offset_attach(obj, type, NULL);
|
||||
if (IS_ERR(mmo))
|
||||
return PTR_ERR(mmo);
|
||||
err = __assign_mmap_offset(obj, type, &offset, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
|
||||
addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
|
||||
if (IS_ERR_VALUE(addr))
|
||||
return addr;
|
||||
|
||||
@ -1226,7 +1220,7 @@ static int igt_mmap_gpu(void *arg)
|
||||
struct drm_i915_gem_object *obj;
|
||||
int err;
|
||||
|
||||
obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
|
||||
obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER);
|
||||
if (obj == ERR_PTR(-ENODEV))
|
||||
continue;
|
||||
|
||||
@ -1303,18 +1297,18 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
|
||||
struct drm_i915_gem_object *obj,
|
||||
enum i915_mmap_type type)
|
||||
{
|
||||
struct i915_mmap_offset *mmo;
|
||||
unsigned long addr;
|
||||
int err;
|
||||
u64 offset;
|
||||
|
||||
if (!can_mmap(obj, type))
|
||||
return 0;
|
||||
|
||||
mmo = mmap_offset_attach(obj, type, NULL);
|
||||
if (IS_ERR(mmo))
|
||||
return PTR_ERR(mmo);
|
||||
err = __assign_mmap_offset(obj, type, &offset, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
|
||||
addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
|
||||
if (IS_ERR_VALUE(addr))
|
||||
return addr;
|
||||
|
||||
@ -1350,10 +1344,20 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
|
||||
}
|
||||
}
|
||||
|
||||
err = check_absent(addr, obj->base.size);
|
||||
if (err) {
|
||||
pr_err("%s: was not absent\n", obj->mm.region->name);
|
||||
goto out_unmap;
|
||||
if (!obj->ops->mmap_ops) {
|
||||
err = check_absent(addr, obj->base.size);
|
||||
if (err) {
|
||||
pr_err("%s: was not absent\n", obj->mm.region->name);
|
||||
goto out_unmap;
|
||||
}
|
||||
} else {
|
||||
/* ttm allows access to evicted regions by design */
|
||||
|
||||
err = check_present(addr, obj->base.size);
|
||||
if (err) {
|
||||
pr_err("%s: was not present\n", obj->mm.region->name);
|
||||
goto out_unmap;
|
||||
}
|
||||
}
|
||||
|
||||
out_unmap:
|
||||
@ -1371,7 +1375,7 @@ static int igt_mmap_revoke(void *arg)
|
||||
struct drm_i915_gem_object *obj;
|
||||
int err;
|
||||
|
||||
obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
|
||||
obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER);
|
||||
if (obj == ERR_PTR(-ENODEV))
|
||||
continue;
|
||||
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "intel_region_ttm.h"
|
||||
#include "gem/i915_gem_lmem.h"
|
||||
#include "gem/i915_gem_region.h"
|
||||
#include "gem/i915_gem_ttm.h"
|
||||
#include "intel_region_lmem.h"
|
||||
|
||||
static int init_fake_lmem_bar(struct intel_memory_region *mem)
|
||||
@ -107,7 +108,7 @@ out_no_io:
|
||||
static const struct intel_memory_region_ops intel_region_lmem_ops = {
|
||||
.init = region_lmem_init,
|
||||
.release = region_lmem_release,
|
||||
.init_object = __i915_gem_lmem_object_init,
|
||||
.init_object = __i915_gem_ttm_object_init,
|
||||
};
|
||||
|
||||
struct intel_memory_region *
|
||||
|
@ -1005,8 +1005,11 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
|
||||
}
|
||||
}
|
||||
|
||||
if (obj->mm.madv != __I915_MADV_PURGED)
|
||||
if (obj->mm.madv != __I915_MADV_PURGED) {
|
||||
obj->mm.madv = args->madv;
|
||||
if (obj->ops->adjust_lru)
|
||||
obj->ops->adjust_lru(obj);
|
||||
}
|
||||
|
||||
if (i915_gem_object_has_pages(obj)) {
|
||||
unsigned long flags;
|
||||
|
@ -149,7 +149,6 @@ intel_memory_region_create(struct drm_i915_private *i915,
|
||||
|
||||
mutex_init(&mem->objects.lock);
|
||||
INIT_LIST_HEAD(&mem->objects.list);
|
||||
INIT_LIST_HEAD(&mem->objects.purgeable);
|
||||
INIT_LIST_HEAD(&mem->reserved);
|
||||
|
||||
mutex_init(&mem->mm_lock);
|
||||
|
@ -101,7 +101,6 @@ struct intel_memory_region {
|
||||
struct {
|
||||
struct mutex lock; /* Protects access to objects */
|
||||
struct list_head list;
|
||||
struct list_head purgeable;
|
||||
} objects;
|
||||
|
||||
size_t chunk_size;
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
#include "intel_region_ttm.h"
|
||||
|
||||
#include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */
|
||||
/**
|
||||
* DOC: TTM support structure
|
||||
*
|
||||
@ -20,9 +21,6 @@
|
||||
* i915 GEM regions to TTM memory types and resource managers.
|
||||
*/
|
||||
|
||||
/* A Zero-initialized driver for now. We don't have a TTM backend yet. */
|
||||
static struct ttm_device_funcs i915_ttm_bo_driver;
|
||||
|
||||
/**
|
||||
* intel_region_ttm_device_init - Initialize a TTM device
|
||||
* @dev_priv: Pointer to an i915 device private structure.
|
||||
@ -33,7 +31,7 @@ int intel_region_ttm_device_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *drm = &dev_priv->drm;
|
||||
|
||||
return ttm_device_init(&dev_priv->bdev, &i915_ttm_bo_driver,
|
||||
return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(),
|
||||
drm->dev, drm->anon_inode->i_mapping,
|
||||
drm->vma_offset_manager, false, false);
|
||||
}
|
||||
@ -177,6 +175,7 @@ struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem,
|
||||
mem->region.start);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_I915_SELFTEST
|
||||
/**
|
||||
* intel_region_ttm_node_alloc - Allocate memory resources from a region
|
||||
* @mem: The memory region,
|
||||
@ -224,3 +223,4 @@ intel_region_ttm_node_alloc(struct intel_memory_region *mem,
|
||||
ret = -ENXIO;
|
||||
return ret ? ERR_PTR(ret) : res;
|
||||
}
|
||||
#endif
|
||||
|
@ -12,6 +12,7 @@
|
||||
struct drm_i915_private;
|
||||
struct intel_memory_region;
|
||||
struct ttm_resource;
|
||||
struct ttm_device_funcs;
|
||||
|
||||
int intel_region_ttm_device_init(struct drm_i915_private *dev_priv);
|
||||
|
||||
@ -24,11 +25,15 @@ void intel_region_ttm_fini(struct intel_memory_region *mem);
|
||||
struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem,
|
||||
struct ttm_resource *res);
|
||||
|
||||
void intel_region_ttm_node_free(struct intel_memory_region *mem,
|
||||
struct ttm_resource *node);
|
||||
|
||||
struct ttm_device_funcs *i915_ttm_driver(void);
|
||||
|
||||
#ifdef CONFIG_DRM_I915_SELFTEST
|
||||
struct ttm_resource *
|
||||
intel_region_ttm_node_alloc(struct intel_memory_region *mem,
|
||||
resource_size_t size,
|
||||
unsigned int flags);
|
||||
|
||||
void intel_region_ttm_node_free(struct intel_memory_region *mem,
|
||||
struct ttm_resource *node);
|
||||
#endif
|
||||
#endif /* _INTEL_REGION_TTM_H_ */
|
||||
|
@ -9,15 +9,28 @@
|
||||
#include "i915_drv.h"
|
||||
#include "igt_mmap.h"
|
||||
|
||||
unsigned long igt_mmap_node(struct drm_i915_private *i915,
|
||||
struct drm_vma_offset_node *node,
|
||||
unsigned long addr,
|
||||
unsigned long prot,
|
||||
unsigned long flags)
|
||||
unsigned long igt_mmap_offset(struct drm_i915_private *i915,
|
||||
u64 offset,
|
||||
unsigned long size,
|
||||
unsigned long prot,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct drm_vma_offset_node *node;
|
||||
struct file *file;
|
||||
unsigned long addr;
|
||||
int err;
|
||||
|
||||
/* no need to refcount, we own this object */
|
||||
drm_vma_offset_lock_lookup(i915->drm.vma_offset_manager);
|
||||
node = drm_vma_offset_exact_lookup_locked(i915->drm.vma_offset_manager,
|
||||
offset / PAGE_SIZE, size / PAGE_SIZE);
|
||||
drm_vma_offset_unlock_lookup(i915->drm.vma_offset_manager);
|
||||
|
||||
if (GEM_WARN_ON(!node)) {
|
||||
pr_info("Failed to lookup %llx\n", offset);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* Pretend to open("/dev/dri/card0") */
|
||||
file = mock_drm_getfile(i915->drm.primary, O_RDWR);
|
||||
if (IS_ERR(file))
|
||||
@ -29,7 +42,7 @@ unsigned long igt_mmap_node(struct drm_i915_private *i915,
|
||||
goto out_file;
|
||||
}
|
||||
|
||||
addr = vm_mmap(file, addr, drm_vma_node_size(node) << PAGE_SHIFT,
|
||||
addr = vm_mmap(file, 0, drm_vma_node_size(node) << PAGE_SHIFT,
|
||||
prot, flags, drm_vma_node_offset_addr(node));
|
||||
|
||||
drm_vma_node_revoke(node, file->private_data);
|
||||
|
@ -7,13 +7,15 @@
|
||||
#ifndef IGT_MMAP_H
|
||||
#define IGT_MMAP_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct drm_i915_private;
|
||||
struct drm_vma_offset_node;
|
||||
|
||||
unsigned long igt_mmap_node(struct drm_i915_private *i915,
|
||||
struct drm_vma_offset_node *node,
|
||||
unsigned long addr,
|
||||
unsigned long prot,
|
||||
unsigned long flags);
|
||||
unsigned long igt_mmap_offset(struct drm_i915_private *i915,
|
||||
u64 offset,
|
||||
unsigned long size,
|
||||
unsigned long prot,
|
||||
unsigned long flags);
|
||||
|
||||
#endif /* IGT_MMAP_H */
|
||||
|
@ -53,7 +53,7 @@ struct drm_vma_offset_node {
|
||||
rwlock_t vm_lock;
|
||||
struct drm_mm_node vm_node;
|
||||
struct rb_root vm_files;
|
||||
bool readonly:1;
|
||||
void *driver_private;
|
||||
};
|
||||
|
||||
struct drm_vma_offset_manager {
|
||||
|
Loading…
Reference in New Issue
Block a user