drm/i915: Move phys objects to its own file
Continuing the decluttering of i915_gem.c, this time the legacy physical object. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-5-chris@chris-wilson.co.uk
This commit is contained in:
parent
8475355f7a
commit
f033428db2
@ -88,6 +88,8 @@ i915-y += $(gt-y)
|
||||
obj-y += gem/
|
||||
gem-y += \
|
||||
gem/i915_gem_object.o \
|
||||
gem/i915_gem_pages.o \
|
||||
gem/i915_gem_phys.o \
|
||||
gem/i915_gem_shmem.o
|
||||
i915-y += \
|
||||
$(gem-y) \
|
||||
|
@ -33,11 +33,17 @@ void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages,
|
||||
bool needs_clflush);
|
||||
|
||||
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
|
||||
|
||||
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
|
||||
void i915_gem_free_object(struct drm_gem_object *obj);
|
||||
|
||||
void i915_gem_flush_free_objects(struct drm_i915_private *i915);
|
||||
|
||||
struct sg_table *
|
||||
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
|
||||
|
||||
/**
|
||||
* i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
|
||||
* @filp: DRM file private date
|
||||
@ -236,6 +242,8 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
|
||||
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages,
|
||||
unsigned int sg_page_sizes);
|
||||
|
||||
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
|
||||
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
|
||||
|
||||
static inline int __must_check
|
||||
@ -291,7 +299,8 @@ enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
|
||||
|
||||
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
|
||||
enum i915_mm_subclass subclass);
|
||||
void __i915_gem_object_truncate(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
|
||||
|
||||
enum i915_map_type {
|
||||
I915_MAP_WB = 0,
|
||||
|
@ -52,6 +52,8 @@ struct drm_i915_gem_object_ops {
|
||||
int (*get_pages)(struct drm_i915_gem_object *obj);
|
||||
void (*put_pages)(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages);
|
||||
void (*truncate)(struct drm_i915_gem_object *obj);
|
||||
void (*writeback)(struct drm_i915_gem_object *obj);
|
||||
|
||||
int (*pwrite)(struct drm_i915_gem_object *obj,
|
||||
const struct drm_i915_gem_pwrite *arg);
|
||||
|
521
drivers/gpu/drm/i915/gem/i915_gem_pages.c
Normal file
521
drivers/gpu/drm/i915/gem/i915_gem_pages.c
Normal file
@ -0,0 +1,521 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Copyright © 2014-2016 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_gem_object.h"
|
||||
|
||||
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages,
|
||||
unsigned int sg_page_sizes)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
unsigned long supported = INTEL_INFO(i915)->page_sizes;
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&obj->mm.lock);
|
||||
|
||||
/* Make the pages coherent with the GPU (flushing any swapin). */
|
||||
if (obj->cache_dirty) {
|
||||
obj->write_domain = 0;
|
||||
if (i915_gem_object_has_struct_page(obj))
|
||||
drm_clflush_sg(pages);
|
||||
obj->cache_dirty = false;
|
||||
}
|
||||
|
||||
obj->mm.get_page.sg_pos = pages->sgl;
|
||||
obj->mm.get_page.sg_idx = 0;
|
||||
|
||||
obj->mm.pages = pages;
|
||||
|
||||
if (i915_gem_object_is_tiled(obj) &&
|
||||
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
|
||||
GEM_BUG_ON(obj->mm.quirked);
|
||||
__i915_gem_object_pin_pages(obj);
|
||||
obj->mm.quirked = true;
|
||||
}
|
||||
|
||||
GEM_BUG_ON(!sg_page_sizes);
|
||||
obj->mm.page_sizes.phys = sg_page_sizes;
|
||||
|
||||
/*
|
||||
* Calculate the supported page-sizes which fit into the given
|
||||
* sg_page_sizes. This will give us the page-sizes which we may be able
|
||||
* to use opportunistically when later inserting into the GTT. For
|
||||
* example if phys=2G, then in theory we should be able to use 1G, 2M,
|
||||
* 64K or 4K pages, although in practice this will depend on a number of
|
||||
* other factors.
|
||||
*/
|
||||
obj->mm.page_sizes.sg = 0;
|
||||
for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
|
||||
if (obj->mm.page_sizes.phys & ~0u << i)
|
||||
obj->mm.page_sizes.sg |= BIT(i);
|
||||
}
|
||||
GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
|
||||
|
||||
spin_lock(&i915->mm.obj_lock);
|
||||
list_add(&obj->mm.link, &i915->mm.unbound_list);
|
||||
spin_unlock(&i915->mm.obj_lock);
|
||||
}
|
||||
|
||||
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
|
||||
DRM_DEBUG("Attempting to obtain a purgeable object\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
err = obj->ops->get_pages(obj);
|
||||
GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Ensure that the associated pages are gathered from the backing storage
|
||||
* and pinned into our object. i915_gem_object_pin_pages() may be called
|
||||
* multiple times before they are released by a single call to
|
||||
* i915_gem_object_unpin_pages() - once the pages are no longer referenced
|
||||
* either as a result of memory pressure (reaping pages under the shrinker)
|
||||
* or as the object is itself released.
|
||||
*/
|
||||
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mutex_lock_interruptible(&obj->mm.lock);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (unlikely(!i915_gem_object_has_pages(obj))) {
|
||||
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
|
||||
|
||||
err = ____i915_gem_object_get_pages(obj);
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
smp_mb__before_atomic();
|
||||
}
|
||||
atomic_inc(&obj->mm.pages_pin_count);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Immediately discard the backing storage */
|
||||
void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
drm_gem_free_mmap_offset(&obj->base);
|
||||
if (obj->ops->truncate)
|
||||
obj->ops->truncate(obj);
|
||||
}
|
||||
|
||||
/* Try to discard unwanted pages */
|
||||
void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
lockdep_assert_held(&obj->mm.lock);
|
||||
GEM_BUG_ON(i915_gem_object_has_pages(obj));
|
||||
|
||||
if (obj->ops->writeback)
|
||||
obj->ops->writeback(obj);
|
||||
}
|
||||
|
||||
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct radix_tree_iter iter;
|
||||
void __rcu **slot;
|
||||
|
||||
rcu_read_lock();
|
||||
radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
|
||||
radix_tree_delete(&obj->mm.get_page.radix, iter.index);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
struct sg_table *
|
||||
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
struct sg_table *pages;
|
||||
|
||||
pages = fetch_and_zero(&obj->mm.pages);
|
||||
if (IS_ERR_OR_NULL(pages))
|
||||
return pages;
|
||||
|
||||
spin_lock(&i915->mm.obj_lock);
|
||||
list_del(&obj->mm.link);
|
||||
spin_unlock(&i915->mm.obj_lock);
|
||||
|
||||
if (obj->mm.mapping) {
|
||||
void *ptr;
|
||||
|
||||
ptr = page_mask_bits(obj->mm.mapping);
|
||||
if (is_vmalloc_addr(ptr))
|
||||
vunmap(ptr);
|
||||
else
|
||||
kunmap(kmap_to_page(ptr));
|
||||
|
||||
obj->mm.mapping = NULL;
|
||||
}
|
||||
|
||||
__i915_gem_object_reset_page_iter(obj);
|
||||
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
|
||||
enum i915_mm_subclass subclass)
|
||||
{
|
||||
struct sg_table *pages;
|
||||
int err;
|
||||
|
||||
if (i915_gem_object_has_pinned_pages(obj))
|
||||
return -EBUSY;
|
||||
|
||||
GEM_BUG_ON(obj->bind_count);
|
||||
|
||||
/* May be called by shrinker from within get_pages() (on another bo) */
|
||||
mutex_lock_nested(&obj->mm.lock, subclass);
|
||||
if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
|
||||
err = -EBUSY;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* ->put_pages might need to allocate memory for the bit17 swizzle
|
||||
* array, hence protect them from being reaped by removing them from gtt
|
||||
* lists early.
|
||||
*/
|
||||
pages = __i915_gem_object_unset_pages(obj);
|
||||
|
||||
/*
|
||||
* XXX Temporary hijinx to avoid updating all backends to handle
|
||||
* NULL pages. In the future, when we have more asynchronous
|
||||
* get_pages backends we should be better able to handle the
|
||||
* cancellation of the async task in a more uniform manner.
|
||||
*/
|
||||
if (!pages && !i915_gem_object_needs_async_cancel(obj))
|
||||
pages = ERR_PTR(-EINVAL);
|
||||
|
||||
if (!IS_ERR(pages))
|
||||
obj->ops->put_pages(obj, pages);
|
||||
|
||||
err = 0;
|
||||
unlock:
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* The 'mapping' part of i915_gem_object_pin_map() below */
|
||||
static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
|
||||
enum i915_map_type type)
|
||||
{
|
||||
unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
|
||||
struct sg_table *sgt = obj->mm.pages;
|
||||
struct sgt_iter sgt_iter;
|
||||
struct page *page;
|
||||
struct page *stack_pages[32];
|
||||
struct page **pages = stack_pages;
|
||||
unsigned long i = 0;
|
||||
pgprot_t pgprot;
|
||||
void *addr;
|
||||
|
||||
/* A single page can always be kmapped */
|
||||
if (n_pages == 1 && type == I915_MAP_WB)
|
||||
return kmap(sg_page(sgt->sgl));
|
||||
|
||||
if (n_pages > ARRAY_SIZE(stack_pages)) {
|
||||
/* Too big for stack -- allocate temporary array instead */
|
||||
pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
|
||||
if (!pages)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for_each_sgt_page(page, sgt_iter, sgt)
|
||||
pages[i++] = page;
|
||||
|
||||
/* Check that we have the expected number of pages */
|
||||
GEM_BUG_ON(i != n_pages);
|
||||
|
||||
switch (type) {
|
||||
default:
|
||||
MISSING_CASE(type);
|
||||
/* fallthrough to use PAGE_KERNEL anyway */
|
||||
case I915_MAP_WB:
|
||||
pgprot = PAGE_KERNEL;
|
||||
break;
|
||||
case I915_MAP_WC:
|
||||
pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
|
||||
break;
|
||||
}
|
||||
addr = vmap(pages, n_pages, 0, pgprot);
|
||||
|
||||
if (pages != stack_pages)
|
||||
kvfree(pages);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
/* get, pin, and map the pages of the object into kernel space */
|
||||
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
|
||||
enum i915_map_type type)
|
||||
{
|
||||
enum i915_map_type has_type;
|
||||
bool pinned;
|
||||
void *ptr;
|
||||
int err;
|
||||
|
||||
if (unlikely(!i915_gem_object_has_struct_page(obj)))
|
||||
return ERR_PTR(-ENXIO);
|
||||
|
||||
err = mutex_lock_interruptible(&obj->mm.lock);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
pinned = !(type & I915_MAP_OVERRIDE);
|
||||
type &= ~I915_MAP_OVERRIDE;
|
||||
|
||||
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
|
||||
if (unlikely(!i915_gem_object_has_pages(obj))) {
|
||||
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
|
||||
|
||||
err = ____i915_gem_object_get_pages(obj);
|
||||
if (err)
|
||||
goto err_unlock;
|
||||
|
||||
smp_mb__before_atomic();
|
||||
}
|
||||
atomic_inc(&obj->mm.pages_pin_count);
|
||||
pinned = false;
|
||||
}
|
||||
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
|
||||
|
||||
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
|
||||
if (ptr && has_type != type) {
|
||||
if (pinned) {
|
||||
err = -EBUSY;
|
||||
goto err_unpin;
|
||||
}
|
||||
|
||||
if (is_vmalloc_addr(ptr))
|
||||
vunmap(ptr);
|
||||
else
|
||||
kunmap(kmap_to_page(ptr));
|
||||
|
||||
ptr = obj->mm.mapping = NULL;
|
||||
}
|
||||
|
||||
if (!ptr) {
|
||||
ptr = i915_gem_object_map(obj, type);
|
||||
if (!ptr) {
|
||||
err = -ENOMEM;
|
||||
goto err_unpin;
|
||||
}
|
||||
|
||||
obj->mm.mapping = page_pack_bits(ptr, type);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
return ptr;
|
||||
|
||||
err_unpin:
|
||||
atomic_dec(&obj->mm.pages_pin_count);
|
||||
err_unlock:
|
||||
ptr = ERR_PTR(err);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
|
||||
unsigned long offset,
|
||||
unsigned long size)
|
||||
{
|
||||
enum i915_map_type has_type;
|
||||
void *ptr;
|
||||
|
||||
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
||||
GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
|
||||
offset, size, obj->base.size));
|
||||
|
||||
obj->mm.dirty = true;
|
||||
|
||||
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
|
||||
return;
|
||||
|
||||
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
|
||||
if (has_type == I915_MAP_WC)
|
||||
return;
|
||||
|
||||
drm_clflush_virt_range(ptr + offset, size);
|
||||
if (size == obj->base.size) {
|
||||
obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
|
||||
obj->cache_dirty = false;
|
||||
}
|
||||
}
|
||||
|
||||
struct scatterlist *
|
||||
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
|
||||
unsigned int n,
|
||||
unsigned int *offset)
|
||||
{
|
||||
struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
|
||||
struct scatterlist *sg;
|
||||
unsigned int idx, count;
|
||||
|
||||
might_sleep();
|
||||
GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
|
||||
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
||||
|
||||
/* As we iterate forward through the sg, we record each entry in a
|
||||
* radixtree for quick repeated (backwards) lookups. If we have seen
|
||||
* this index previously, we will have an entry for it.
|
||||
*
|
||||
* Initial lookup is O(N), but this is amortized to O(1) for
|
||||
* sequential page access (where each new request is consecutive
|
||||
* to the previous one). Repeated lookups are O(lg(obj->base.size)),
|
||||
* i.e. O(1) with a large constant!
|
||||
*/
|
||||
if (n < READ_ONCE(iter->sg_idx))
|
||||
goto lookup;
|
||||
|
||||
mutex_lock(&iter->lock);
|
||||
|
||||
/* We prefer to reuse the last sg so that repeated lookup of this
|
||||
* (or the subsequent) sg are fast - comparing against the last
|
||||
* sg is faster than going through the radixtree.
|
||||
*/
|
||||
|
||||
sg = iter->sg_pos;
|
||||
idx = iter->sg_idx;
|
||||
count = __sg_page_count(sg);
|
||||
|
||||
while (idx + count <= n) {
|
||||
void *entry;
|
||||
unsigned long i;
|
||||
int ret;
|
||||
|
||||
/* If we cannot allocate and insert this entry, or the
|
||||
* individual pages from this range, cancel updating the
|
||||
* sg_idx so that on this lookup we are forced to linearly
|
||||
* scan onwards, but on future lookups we will try the
|
||||
* insertion again (in which case we need to be careful of
|
||||
* the error return reporting that we have already inserted
|
||||
* this index).
|
||||
*/
|
||||
ret = radix_tree_insert(&iter->radix, idx, sg);
|
||||
if (ret && ret != -EEXIST)
|
||||
goto scan;
|
||||
|
||||
entry = xa_mk_value(idx);
|
||||
for (i = 1; i < count; i++) {
|
||||
ret = radix_tree_insert(&iter->radix, idx + i, entry);
|
||||
if (ret && ret != -EEXIST)
|
||||
goto scan;
|
||||
}
|
||||
|
||||
idx += count;
|
||||
sg = ____sg_next(sg);
|
||||
count = __sg_page_count(sg);
|
||||
}
|
||||
|
||||
scan:
|
||||
iter->sg_pos = sg;
|
||||
iter->sg_idx = idx;
|
||||
|
||||
mutex_unlock(&iter->lock);
|
||||
|
||||
if (unlikely(n < idx)) /* insertion completed by another thread */
|
||||
goto lookup;
|
||||
|
||||
/* In case we failed to insert the entry into the radixtree, we need
|
||||
* to look beyond the current sg.
|
||||
*/
|
||||
while (idx + count <= n) {
|
||||
idx += count;
|
||||
sg = ____sg_next(sg);
|
||||
count = __sg_page_count(sg);
|
||||
}
|
||||
|
||||
*offset = n - idx;
|
||||
return sg;
|
||||
|
||||
lookup:
|
||||
rcu_read_lock();
|
||||
|
||||
sg = radix_tree_lookup(&iter->radix, n);
|
||||
GEM_BUG_ON(!sg);
|
||||
|
||||
/* If this index is in the middle of multi-page sg entry,
|
||||
* the radix tree will contain a value entry that points
|
||||
* to the start of that range. We will return the pointer to
|
||||
* the base page and the offset of this page within the
|
||||
* sg entry's range.
|
||||
*/
|
||||
*offset = 0;
|
||||
if (unlikely(xa_is_value(sg))) {
|
||||
unsigned long base = xa_to_value(sg);
|
||||
|
||||
sg = radix_tree_lookup(&iter->radix, base);
|
||||
GEM_BUG_ON(!sg);
|
||||
|
||||
*offset = n - base;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return sg;
|
||||
}
|
||||
|
||||
struct page *
|
||||
i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
unsigned int offset;
|
||||
|
||||
GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
|
||||
|
||||
sg = i915_gem_object_get_sg(obj, n, &offset);
|
||||
return nth_page(sg_page(sg), offset);
|
||||
}
|
||||
|
||||
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
|
||||
struct page *
|
||||
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
|
||||
unsigned int n)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
page = i915_gem_object_get_page(obj, n);
|
||||
if (!obj->mm.dirty)
|
||||
set_page_dirty(page);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
dma_addr_t
|
||||
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
|
||||
unsigned long n,
|
||||
unsigned int *len)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
unsigned int offset;
|
||||
|
||||
sg = i915_gem_object_get_sg(obj, n, &offset);
|
||||
|
||||
if (len)
|
||||
*len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
|
||||
|
||||
return sg_dma_address(sg) + (offset << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
dma_addr_t
|
||||
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
|
||||
unsigned long n)
|
||||
{
|
||||
return i915_gem_object_get_dma_address_len(obj, n, NULL);
|
||||
}
|
211
drivers/gpu/drm/i915/gem/i915_gem_phys.c
Normal file
211
drivers/gpu/drm/i915/gem/i915_gem_phys.c
Normal file
@ -0,0 +1,211 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Copyright © 2014-2016 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/shmem_fs.h>
|
||||
#include <linux/swap.h>
|
||||
|
||||
#include <drm/drm.h> /* for drm_legacy.h! */
|
||||
#include <drm/drm_cache.h>
|
||||
#include <drm/drm_legacy.h> /* for drm_pci.h! */
|
||||
#include <drm/drm_pci.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_gem_object.h"
|
||||
|
||||
static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct address_space *mapping = obj->base.filp->f_mapping;
|
||||
struct drm_dma_handle *phys;
|
||||
struct sg_table *st;
|
||||
struct scatterlist *sg;
|
||||
char *vaddr;
|
||||
int i;
|
||||
int err;
|
||||
|
||||
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
|
||||
return -EINVAL;
|
||||
|
||||
/* Always aligning to the object size, allows a single allocation
|
||||
* to handle all possible callers, and given typical object sizes,
|
||||
* the alignment of the buddy allocation will naturally match.
|
||||
*/
|
||||
phys = drm_pci_alloc(obj->base.dev,
|
||||
roundup_pow_of_two(obj->base.size),
|
||||
roundup_pow_of_two(obj->base.size));
|
||||
if (!phys)
|
||||
return -ENOMEM;
|
||||
|
||||
vaddr = phys->vaddr;
|
||||
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
|
||||
struct page *page;
|
||||
char *src;
|
||||
|
||||
page = shmem_read_mapping_page(mapping, i);
|
||||
if (IS_ERR(page)) {
|
||||
err = PTR_ERR(page);
|
||||
goto err_phys;
|
||||
}
|
||||
|
||||
src = kmap_atomic(page);
|
||||
memcpy(vaddr, src, PAGE_SIZE);
|
||||
drm_clflush_virt_range(vaddr, PAGE_SIZE);
|
||||
kunmap_atomic(src);
|
||||
|
||||
put_page(page);
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
i915_gem_chipset_flush(to_i915(obj->base.dev));
|
||||
|
||||
st = kmalloc(sizeof(*st), GFP_KERNEL);
|
||||
if (!st) {
|
||||
err = -ENOMEM;
|
||||
goto err_phys;
|
||||
}
|
||||
|
||||
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
|
||||
kfree(st);
|
||||
err = -ENOMEM;
|
||||
goto err_phys;
|
||||
}
|
||||
|
||||
sg = st->sgl;
|
||||
sg->offset = 0;
|
||||
sg->length = obj->base.size;
|
||||
|
||||
sg_dma_address(sg) = phys->busaddr;
|
||||
sg_dma_len(sg) = obj->base.size;
|
||||
|
||||
obj->phys_handle = phys;
|
||||
|
||||
__i915_gem_object_set_pages(obj, st, sg->length);
|
||||
|
||||
return 0;
|
||||
|
||||
err_phys:
|
||||
drm_pci_free(obj->base.dev, phys);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages)
|
||||
{
|
||||
__i915_gem_object_release_shmem(obj, pages, false);
|
||||
|
||||
if (obj->mm.dirty) {
|
||||
struct address_space *mapping = obj->base.filp->f_mapping;
|
||||
char *vaddr = obj->phys_handle->vaddr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
|
||||
struct page *page;
|
||||
char *dst;
|
||||
|
||||
page = shmem_read_mapping_page(mapping, i);
|
||||
if (IS_ERR(page))
|
||||
continue;
|
||||
|
||||
dst = kmap_atomic(page);
|
||||
drm_clflush_virt_range(vaddr, PAGE_SIZE);
|
||||
memcpy(dst, vaddr, PAGE_SIZE);
|
||||
kunmap_atomic(dst);
|
||||
|
||||
set_page_dirty(page);
|
||||
if (obj->mm.madv == I915_MADV_WILLNEED)
|
||||
mark_page_accessed(page);
|
||||
put_page(page);
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
obj->mm.dirty = false;
|
||||
}
|
||||
|
||||
sg_free_table(pages);
|
||||
kfree(pages);
|
||||
|
||||
drm_pci_free(obj->base.dev, obj->phys_handle);
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
}
|
||||
|
||||
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
|
||||
.get_pages = i915_gem_object_get_pages_phys,
|
||||
.put_pages = i915_gem_object_put_pages_phys,
|
||||
.release = i915_gem_object_release_phys,
|
||||
};
|
||||
|
||||
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
|
||||
{
|
||||
struct sg_table *pages;
|
||||
int err;
|
||||
|
||||
if (align > obj->base.size)
|
||||
return -EINVAL;
|
||||
|
||||
if (obj->ops == &i915_gem_phys_ops)
|
||||
return 0;
|
||||
|
||||
if (obj->ops != &i915_gem_shmem_ops)
|
||||
return -EINVAL;
|
||||
|
||||
err = i915_gem_object_unbind(obj);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mutex_lock(&obj->mm.lock);
|
||||
|
||||
if (obj->mm.madv != I915_MADV_WILLNEED) {
|
||||
err = -EFAULT;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
if (obj->mm.quirked) {
|
||||
err = -EFAULT;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
if (obj->mm.mapping) {
|
||||
err = -EBUSY;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
pages = __i915_gem_object_unset_pages(obj);
|
||||
|
||||
obj->ops = &i915_gem_phys_ops;
|
||||
|
||||
err = ____i915_gem_object_get_pages(obj);
|
||||
if (err)
|
||||
goto err_xfer;
|
||||
|
||||
/* Perma-pin (until release) the physical set of pages */
|
||||
__i915_gem_object_pin_pages(obj);
|
||||
|
||||
if (!IS_ERR_OR_NULL(pages))
|
||||
i915_gem_shmem_ops.put_pages(obj, pages);
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
return 0;
|
||||
|
||||
err_xfer:
|
||||
obj->ops = &i915_gem_shmem_ops;
|
||||
if (!IS_ERR_OR_NULL(pages)) {
|
||||
unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
|
||||
|
||||
__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
|
||||
}
|
||||
err_unlock:
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
#include "selftests/i915_gem_phys.c"
|
||||
#endif
|
@ -213,6 +213,65 @@ err_pages:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
shmem_truncate(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
/*
|
||||
* Our goal here is to return as much of the memory as
|
||||
* is possible back to the system as we are called from OOM.
|
||||
* To do this we must instruct the shmfs to drop all of its
|
||||
* backing pages, *now*.
|
||||
*/
|
||||
shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
|
||||
obj->mm.madv = __I915_MADV_PURGED;
|
||||
obj->mm.pages = ERR_PTR(-EFAULT);
|
||||
}
|
||||
|
||||
static void
|
||||
shmem_writeback(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct address_space *mapping;
|
||||
struct writeback_control wbc = {
|
||||
.sync_mode = WB_SYNC_NONE,
|
||||
.nr_to_write = SWAP_CLUSTER_MAX,
|
||||
.range_start = 0,
|
||||
.range_end = LLONG_MAX,
|
||||
.for_reclaim = 1,
|
||||
};
|
||||
unsigned long i;
|
||||
|
||||
/*
|
||||
* Leave mmapings intact (GTT will have been revoked on unbinding,
|
||||
* leaving only CPU mmapings around) and add those pages to the LRU
|
||||
* instead of invoking writeback so they are aged and paged out
|
||||
* as normal.
|
||||
*/
|
||||
mapping = obj->base.filp->f_mapping;
|
||||
|
||||
/* Begin writeback on each dirty page */
|
||||
for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
|
||||
struct page *page;
|
||||
|
||||
page = find_lock_entry(mapping, i);
|
||||
if (!page || xa_is_value(page))
|
||||
continue;
|
||||
|
||||
if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
|
||||
int ret;
|
||||
|
||||
SetPageReclaim(page);
|
||||
ret = mapping->a_ops->writepage(page, &wbc);
|
||||
if (!PageWriteback(page))
|
||||
ClearPageReclaim(page);
|
||||
if (!ret)
|
||||
goto put;
|
||||
}
|
||||
unlock_page(page);
|
||||
put:
|
||||
put_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages,
|
||||
@ -362,6 +421,8 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
|
||||
|
||||
.get_pages = shmem_get_pages,
|
||||
.put_pages = shmem_put_pages,
|
||||
.truncate = shmem_truncate,
|
||||
.writeback = shmem_writeback,
|
||||
|
||||
.pwrite = shmem_pwrite,
|
||||
};
|
||||
|
80
drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
Normal file
80
drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
Normal file
@ -0,0 +1,80 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Copyright © 2016 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "i915_selftest.h"
|
||||
|
||||
#include "selftests/mock_gem_device.h"
|
||||
|
||||
static int mock_phys_object(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int err;
|
||||
|
||||
/* Create an object and bind it to a contiguous set of physical pages,
|
||||
* i.e. exercise the i915_gem_object_phys API.
|
||||
*/
|
||||
|
||||
obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
|
||||
if (IS_ERR(obj)) {
|
||||
err = PTR_ERR(obj);
|
||||
pr_err("i915_gem_object_create failed, err=%d\n", err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
if (err) {
|
||||
pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
|
||||
goto out_obj;
|
||||
}
|
||||
|
||||
if (obj->ops != &i915_gem_phys_ops) {
|
||||
pr_err("i915_gem_object_attach_phys did not create a phys object\n");
|
||||
err = -EINVAL;
|
||||
goto out_obj;
|
||||
}
|
||||
|
||||
if (!atomic_read(&obj->mm.pages_pin_count)) {
|
||||
pr_err("i915_gem_object_attach_phys did not pin its phys pages\n");
|
||||
err = -EINVAL;
|
||||
goto out_obj;
|
||||
}
|
||||
|
||||
/* Make the object dirty so that put_pages must do copy back the data */
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
err = i915_gem_object_set_to_gtt_domain(obj, true);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
if (err) {
|
||||
pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n",
|
||||
err);
|
||||
goto out_obj;
|
||||
}
|
||||
|
||||
out_obj:
|
||||
i915_gem_object_put(obj);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
int i915_gem_phys_mock_selftests(void)
|
||||
{
|
||||
static const struct i915_subtest tests[] = {
|
||||
SUBTEST(mock_phys_object),
|
||||
};
|
||||
struct drm_i915_private *i915;
|
||||
int err;
|
||||
|
||||
i915 = mock_gem_device();
|
||||
if (!i915)
|
||||
return -ENOMEM;
|
||||
|
||||
err = i915_subtests(tests, i915);
|
||||
|
||||
drm_dev_put(&i915->drm);
|
||||
return err;
|
||||
}
|
@ -2903,8 +2903,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
||||
const struct i915_ggtt_view *view,
|
||||
unsigned int flags);
|
||||
void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
|
||||
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
|
||||
int align);
|
||||
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
|
||||
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
|
||||
|
||||
|
@ -26,7 +26,6 @@
|
||||
*/
|
||||
|
||||
#include <drm/drm_vma_manager.h>
|
||||
#include <drm/drm_pci.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include <linux/dma-fence-array.h>
|
||||
#include <linux/kthread.h>
|
||||
@ -99,133 +98,6 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct address_space *mapping = obj->base.filp->f_mapping;
|
||||
drm_dma_handle_t *phys;
|
||||
struct sg_table *st;
|
||||
struct scatterlist *sg;
|
||||
char *vaddr;
|
||||
int i;
|
||||
int err;
|
||||
|
||||
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
|
||||
return -EINVAL;
|
||||
|
||||
/* Always aligning to the object size, allows a single allocation
|
||||
* to handle all possible callers, and given typical object sizes,
|
||||
* the alignment of the buddy allocation will naturally match.
|
||||
*/
|
||||
phys = drm_pci_alloc(obj->base.dev,
|
||||
roundup_pow_of_two(obj->base.size),
|
||||
roundup_pow_of_two(obj->base.size));
|
||||
if (!phys)
|
||||
return -ENOMEM;
|
||||
|
||||
vaddr = phys->vaddr;
|
||||
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
|
||||
struct page *page;
|
||||
char *src;
|
||||
|
||||
page = shmem_read_mapping_page(mapping, i);
|
||||
if (IS_ERR(page)) {
|
||||
err = PTR_ERR(page);
|
||||
goto err_phys;
|
||||
}
|
||||
|
||||
src = kmap_atomic(page);
|
||||
memcpy(vaddr, src, PAGE_SIZE);
|
||||
drm_clflush_virt_range(vaddr, PAGE_SIZE);
|
||||
kunmap_atomic(src);
|
||||
|
||||
put_page(page);
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
i915_gem_chipset_flush(to_i915(obj->base.dev));
|
||||
|
||||
st = kmalloc(sizeof(*st), GFP_KERNEL);
|
||||
if (!st) {
|
||||
err = -ENOMEM;
|
||||
goto err_phys;
|
||||
}
|
||||
|
||||
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
|
||||
kfree(st);
|
||||
err = -ENOMEM;
|
||||
goto err_phys;
|
||||
}
|
||||
|
||||
sg = st->sgl;
|
||||
sg->offset = 0;
|
||||
sg->length = obj->base.size;
|
||||
|
||||
sg_dma_address(sg) = phys->busaddr;
|
||||
sg_dma_len(sg) = obj->base.size;
|
||||
|
||||
obj->phys_handle = phys;
|
||||
|
||||
__i915_gem_object_set_pages(obj, st, sg->length);
|
||||
|
||||
return 0;
|
||||
|
||||
err_phys:
|
||||
drm_pci_free(obj->base.dev, phys);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages)
|
||||
{
|
||||
__i915_gem_object_release_shmem(obj, pages, false);
|
||||
|
||||
if (obj->mm.dirty) {
|
||||
struct address_space *mapping = obj->base.filp->f_mapping;
|
||||
char *vaddr = obj->phys_handle->vaddr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
|
||||
struct page *page;
|
||||
char *dst;
|
||||
|
||||
page = shmem_read_mapping_page(mapping, i);
|
||||
if (IS_ERR(page))
|
||||
continue;
|
||||
|
||||
dst = kmap_atomic(page);
|
||||
drm_clflush_virt_range(vaddr, PAGE_SIZE);
|
||||
memcpy(dst, vaddr, PAGE_SIZE);
|
||||
kunmap_atomic(dst);
|
||||
|
||||
set_page_dirty(page);
|
||||
if (obj->mm.madv == I915_MADV_WILLNEED)
|
||||
mark_page_accessed(page);
|
||||
put_page(page);
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
obj->mm.dirty = false;
|
||||
}
|
||||
|
||||
sg_free_table(pages);
|
||||
kfree(pages);
|
||||
|
||||
drm_pci_free(obj->base.dev, obj->phys_handle);
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
}
|
||||
|
||||
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
|
||||
.get_pages = i915_gem_object_get_pages_phys,
|
||||
.put_pages = i915_gem_object_put_pages_phys,
|
||||
.release = i915_gem_object_release_phys,
|
||||
};
|
||||
|
||||
int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
@ -1964,11 +1836,6 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
|
||||
return err;
|
||||
}
|
||||
|
||||
static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
drm_gem_free_mmap_offset(&obj->base);
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_mmap_gtt(struct drm_file *file,
|
||||
struct drm_device *dev,
|
||||
@ -2014,111 +1881,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
|
||||
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
|
||||
}
|
||||
|
||||
/* Immediately discard the backing storage */
|
||||
void __i915_gem_object_truncate(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
i915_gem_object_free_mmap_offset(obj);
|
||||
|
||||
if (obj->base.filp == NULL)
|
||||
return;
|
||||
|
||||
/* Our goal here is to return as much of the memory as
|
||||
* is possible back to the system as we are called from OOM.
|
||||
* To do this we must instruct the shmfs to drop all of its
|
||||
* backing pages, *now*.
|
||||
*/
|
||||
shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
|
||||
obj->mm.madv = __I915_MADV_PURGED;
|
||||
obj->mm.pages = ERR_PTR(-EFAULT);
|
||||
}
|
||||
|
||||
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct radix_tree_iter iter;
|
||||
void __rcu **slot;
|
||||
|
||||
rcu_read_lock();
|
||||
radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
|
||||
radix_tree_delete(&obj->mm.get_page.radix, iter.index);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static struct sg_table *
|
||||
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
struct sg_table *pages;
|
||||
|
||||
pages = fetch_and_zero(&obj->mm.pages);
|
||||
if (IS_ERR_OR_NULL(pages))
|
||||
return pages;
|
||||
|
||||
spin_lock(&i915->mm.obj_lock);
|
||||
list_del(&obj->mm.link);
|
||||
spin_unlock(&i915->mm.obj_lock);
|
||||
|
||||
if (obj->mm.mapping) {
|
||||
void *ptr;
|
||||
|
||||
ptr = page_mask_bits(obj->mm.mapping);
|
||||
if (is_vmalloc_addr(ptr))
|
||||
vunmap(ptr);
|
||||
else
|
||||
kunmap(kmap_to_page(ptr));
|
||||
|
||||
obj->mm.mapping = NULL;
|
||||
}
|
||||
|
||||
__i915_gem_object_reset_page_iter(obj);
|
||||
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
|
||||
enum i915_mm_subclass subclass)
|
||||
{
|
||||
struct sg_table *pages;
|
||||
int ret;
|
||||
|
||||
if (i915_gem_object_has_pinned_pages(obj))
|
||||
return -EBUSY;
|
||||
|
||||
GEM_BUG_ON(obj->bind_count);
|
||||
|
||||
/* May be called by shrinker from within get_pages() (on another bo) */
|
||||
mutex_lock_nested(&obj->mm.lock, subclass);
|
||||
if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
|
||||
ret = -EBUSY;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* ->put_pages might need to allocate memory for the bit17 swizzle
|
||||
* array, hence protect them from being reaped by removing them from gtt
|
||||
* lists early.
|
||||
*/
|
||||
pages = __i915_gem_object_unset_pages(obj);
|
||||
|
||||
/*
|
||||
* XXX Temporary hijinx to avoid updating all backends to handle
|
||||
* NULL pages. In the future, when we have more asynchronous
|
||||
* get_pages backends we should be better able to handle the
|
||||
* cancellation of the async task in a more uniform manner.
|
||||
*/
|
||||
if (!pages && !i915_gem_object_needs_async_cancel(obj))
|
||||
pages = ERR_PTR(-EINVAL);
|
||||
|
||||
if (!IS_ERR(pages))
|
||||
obj->ops->put_pages(obj, pages);
|
||||
|
||||
ret = 0;
|
||||
unlock:
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool i915_sg_trim(struct sg_table *orig_st)
|
||||
{
|
||||
struct sg_table new_st;
|
||||
@ -2147,252 +1909,6 @@ bool i915_sg_trim(struct sg_table *orig_st)
|
||||
return true;
|
||||
}
|
||||
|
||||
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages,
|
||||
unsigned int sg_page_sizes)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
unsigned long supported = INTEL_INFO(i915)->page_sizes;
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&obj->mm.lock);
|
||||
|
||||
/* Make the pages coherent with the GPU (flushing any swapin). */
|
||||
if (obj->cache_dirty) {
|
||||
obj->write_domain = 0;
|
||||
if (i915_gem_object_has_struct_page(obj))
|
||||
drm_clflush_sg(pages);
|
||||
obj->cache_dirty = false;
|
||||
}
|
||||
|
||||
obj->mm.get_page.sg_pos = pages->sgl;
|
||||
obj->mm.get_page.sg_idx = 0;
|
||||
|
||||
obj->mm.pages = pages;
|
||||
|
||||
if (i915_gem_object_is_tiled(obj) &&
|
||||
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
|
||||
GEM_BUG_ON(obj->mm.quirked);
|
||||
__i915_gem_object_pin_pages(obj);
|
||||
obj->mm.quirked = true;
|
||||
}
|
||||
|
||||
GEM_BUG_ON(!sg_page_sizes);
|
||||
obj->mm.page_sizes.phys = sg_page_sizes;
|
||||
|
||||
/*
|
||||
* Calculate the supported page-sizes which fit into the given
|
||||
* sg_page_sizes. This will give us the page-sizes which we may be able
|
||||
* to use opportunistically when later inserting into the GTT. For
|
||||
* example if phys=2G, then in theory we should be able to use 1G, 2M,
|
||||
* 64K or 4K pages, although in practice this will depend on a number of
|
||||
* other factors.
|
||||
*/
|
||||
obj->mm.page_sizes.sg = 0;
|
||||
for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
|
||||
if (obj->mm.page_sizes.phys & ~0u << i)
|
||||
obj->mm.page_sizes.sg |= BIT(i);
|
||||
}
|
||||
GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
|
||||
|
||||
spin_lock(&i915->mm.obj_lock);
|
||||
list_add(&obj->mm.link, &i915->mm.unbound_list);
|
||||
spin_unlock(&i915->mm.obj_lock);
|
||||
}
|
||||
|
||||
static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
|
||||
DRM_DEBUG("Attempting to obtain a purgeable object\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
err = obj->ops->get_pages(obj);
|
||||
GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Ensure that the associated pages are gathered from the backing storage
|
||||
* and pinned into our object. i915_gem_object_pin_pages() may be called
|
||||
* multiple times before they are released by a single call to
|
||||
* i915_gem_object_unpin_pages() - once the pages are no longer referenced
|
||||
* either as a result of memory pressure (reaping pages under the shrinker)
|
||||
* or as the object is itself released.
|
||||
*/
|
||||
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mutex_lock_interruptible(&obj->mm.lock);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (unlikely(!i915_gem_object_has_pages(obj))) {
|
||||
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
|
||||
|
||||
err = ____i915_gem_object_get_pages(obj);
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
smp_mb__before_atomic();
|
||||
}
|
||||
atomic_inc(&obj->mm.pages_pin_count);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* The 'mapping' part of i915_gem_object_pin_map() below */
|
||||
static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
|
||||
enum i915_map_type type)
|
||||
{
|
||||
unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
|
||||
struct sg_table *sgt = obj->mm.pages;
|
||||
struct sgt_iter sgt_iter;
|
||||
struct page *page;
|
||||
struct page *stack_pages[32];
|
||||
struct page **pages = stack_pages;
|
||||
unsigned long i = 0;
|
||||
pgprot_t pgprot;
|
||||
void *addr;
|
||||
|
||||
/* A single page can always be kmapped */
|
||||
if (n_pages == 1 && type == I915_MAP_WB)
|
||||
return kmap(sg_page(sgt->sgl));
|
||||
|
||||
if (n_pages > ARRAY_SIZE(stack_pages)) {
|
||||
/* Too big for stack -- allocate temporary array instead */
|
||||
pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
|
||||
if (!pages)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for_each_sgt_page(page, sgt_iter, sgt)
|
||||
pages[i++] = page;
|
||||
|
||||
/* Check that we have the expected number of pages */
|
||||
GEM_BUG_ON(i != n_pages);
|
||||
|
||||
switch (type) {
|
||||
default:
|
||||
MISSING_CASE(type);
|
||||
/* fallthrough to use PAGE_KERNEL anyway */
|
||||
case I915_MAP_WB:
|
||||
pgprot = PAGE_KERNEL;
|
||||
break;
|
||||
case I915_MAP_WC:
|
||||
pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
|
||||
break;
|
||||
}
|
||||
addr = vmap(pages, n_pages, 0, pgprot);
|
||||
|
||||
if (pages != stack_pages)
|
||||
kvfree(pages);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
/* get, pin, and map the pages of the object into kernel space */
|
||||
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
|
||||
enum i915_map_type type)
|
||||
{
|
||||
enum i915_map_type has_type;
|
||||
bool pinned;
|
||||
void *ptr;
|
||||
int ret;
|
||||
|
||||
if (unlikely(!i915_gem_object_has_struct_page(obj)))
|
||||
return ERR_PTR(-ENXIO);
|
||||
|
||||
ret = mutex_lock_interruptible(&obj->mm.lock);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
pinned = !(type & I915_MAP_OVERRIDE);
|
||||
type &= ~I915_MAP_OVERRIDE;
|
||||
|
||||
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
|
||||
if (unlikely(!i915_gem_object_has_pages(obj))) {
|
||||
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
|
||||
|
||||
ret = ____i915_gem_object_get_pages(obj);
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
|
||||
smp_mb__before_atomic();
|
||||
}
|
||||
atomic_inc(&obj->mm.pages_pin_count);
|
||||
pinned = false;
|
||||
}
|
||||
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
|
||||
|
||||
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
|
||||
if (ptr && has_type != type) {
|
||||
if (pinned) {
|
||||
ret = -EBUSY;
|
||||
goto err_unpin;
|
||||
}
|
||||
|
||||
if (is_vmalloc_addr(ptr))
|
||||
vunmap(ptr);
|
||||
else
|
||||
kunmap(kmap_to_page(ptr));
|
||||
|
||||
ptr = obj->mm.mapping = NULL;
|
||||
}
|
||||
|
||||
if (!ptr) {
|
||||
ptr = i915_gem_object_map(obj, type);
|
||||
if (!ptr) {
|
||||
ret = -ENOMEM;
|
||||
goto err_unpin;
|
||||
}
|
||||
|
||||
obj->mm.mapping = page_pack_bits(ptr, type);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
return ptr;
|
||||
|
||||
err_unpin:
|
||||
atomic_dec(&obj->mm.pages_pin_count);
|
||||
err_unlock:
|
||||
ptr = ERR_PTR(ret);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
|
||||
unsigned long offset,
|
||||
unsigned long size)
|
||||
{
|
||||
enum i915_map_type has_type;
|
||||
void *ptr;
|
||||
|
||||
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
||||
GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
|
||||
offset, size, obj->base.size));
|
||||
|
||||
obj->mm.dirty = true;
|
||||
|
||||
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
|
||||
return;
|
||||
|
||||
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
|
||||
if (has_type == I915_MAP_WC)
|
||||
return;
|
||||
|
||||
drm_clflush_virt_range(ptr + offset, size);
|
||||
if (size == obj->base.size) {
|
||||
obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
|
||||
obj->cache_dirty = false;
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long to_wait_timeout(s64 timeout_ns)
|
||||
{
|
||||
if (timeout_ns < 0)
|
||||
@ -3381,7 +2897,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
|
||||
/* if the object is no longer attached, discard its backing storage */
|
||||
if (obj->mm.madv == I915_MADV_DONTNEED &&
|
||||
!i915_gem_object_has_pages(obj))
|
||||
__i915_gem_object_truncate(obj);
|
||||
i915_gem_object_truncate(obj);
|
||||
|
||||
args->retained = obj->mm.madv != __I915_MADV_PURGED;
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
@ -4157,232 +3673,6 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
|
||||
}
|
||||
}
|
||||
|
||||
struct scatterlist *
|
||||
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
|
||||
unsigned int n,
|
||||
unsigned int *offset)
|
||||
{
|
||||
struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
|
||||
struct scatterlist *sg;
|
||||
unsigned int idx, count;
|
||||
|
||||
might_sleep();
|
||||
GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
|
||||
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
||||
|
||||
/* As we iterate forward through the sg, we record each entry in a
|
||||
* radixtree for quick repeated (backwards) lookups. If we have seen
|
||||
* this index previously, we will have an entry for it.
|
||||
*
|
||||
* Initial lookup is O(N), but this is amortized to O(1) for
|
||||
* sequential page access (where each new request is consecutive
|
||||
* to the previous one). Repeated lookups are O(lg(obj->base.size)),
|
||||
* i.e. O(1) with a large constant!
|
||||
*/
|
||||
if (n < READ_ONCE(iter->sg_idx))
|
||||
goto lookup;
|
||||
|
||||
mutex_lock(&iter->lock);
|
||||
|
||||
/* We prefer to reuse the last sg so that repeated lookup of this
|
||||
* (or the subsequent) sg are fast - comparing against the last
|
||||
* sg is faster than going through the radixtree.
|
||||
*/
|
||||
|
||||
sg = iter->sg_pos;
|
||||
idx = iter->sg_idx;
|
||||
count = __sg_page_count(sg);
|
||||
|
||||
while (idx + count <= n) {
|
||||
void *entry;
|
||||
unsigned long i;
|
||||
int ret;
|
||||
|
||||
/* If we cannot allocate and insert this entry, or the
|
||||
* individual pages from this range, cancel updating the
|
||||
* sg_idx so that on this lookup we are forced to linearly
|
||||
* scan onwards, but on future lookups we will try the
|
||||
* insertion again (in which case we need to be careful of
|
||||
* the error return reporting that we have already inserted
|
||||
* this index).
|
||||
*/
|
||||
ret = radix_tree_insert(&iter->radix, idx, sg);
|
||||
if (ret && ret != -EEXIST)
|
||||
goto scan;
|
||||
|
||||
entry = xa_mk_value(idx);
|
||||
for (i = 1; i < count; i++) {
|
||||
ret = radix_tree_insert(&iter->radix, idx + i, entry);
|
||||
if (ret && ret != -EEXIST)
|
||||
goto scan;
|
||||
}
|
||||
|
||||
idx += count;
|
||||
sg = ____sg_next(sg);
|
||||
count = __sg_page_count(sg);
|
||||
}
|
||||
|
||||
scan:
|
||||
iter->sg_pos = sg;
|
||||
iter->sg_idx = idx;
|
||||
|
||||
mutex_unlock(&iter->lock);
|
||||
|
||||
if (unlikely(n < idx)) /* insertion completed by another thread */
|
||||
goto lookup;
|
||||
|
||||
/* In case we failed to insert the entry into the radixtree, we need
|
||||
* to look beyond the current sg.
|
||||
*/
|
||||
while (idx + count <= n) {
|
||||
idx += count;
|
||||
sg = ____sg_next(sg);
|
||||
count = __sg_page_count(sg);
|
||||
}
|
||||
|
||||
*offset = n - idx;
|
||||
return sg;
|
||||
|
||||
lookup:
|
||||
rcu_read_lock();
|
||||
|
||||
sg = radix_tree_lookup(&iter->radix, n);
|
||||
GEM_BUG_ON(!sg);
|
||||
|
||||
/* If this index is in the middle of multi-page sg entry,
|
||||
* the radix tree will contain a value entry that points
|
||||
* to the start of that range. We will return the pointer to
|
||||
* the base page and the offset of this page within the
|
||||
* sg entry's range.
|
||||
*/
|
||||
*offset = 0;
|
||||
if (unlikely(xa_is_value(sg))) {
|
||||
unsigned long base = xa_to_value(sg);
|
||||
|
||||
sg = radix_tree_lookup(&iter->radix, base);
|
||||
GEM_BUG_ON(!sg);
|
||||
|
||||
*offset = n - base;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return sg;
|
||||
}
|
||||
|
||||
struct page *
|
||||
i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
unsigned int offset;
|
||||
|
||||
GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
|
||||
|
||||
sg = i915_gem_object_get_sg(obj, n, &offset);
|
||||
return nth_page(sg_page(sg), offset);
|
||||
}
|
||||
|
||||
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
|
||||
struct page *
|
||||
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
|
||||
unsigned int n)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
page = i915_gem_object_get_page(obj, n);
|
||||
if (!obj->mm.dirty)
|
||||
set_page_dirty(page);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
dma_addr_t
|
||||
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
|
||||
unsigned long n,
|
||||
unsigned int *len)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
unsigned int offset;
|
||||
|
||||
sg = i915_gem_object_get_sg(obj, n, &offset);
|
||||
|
||||
if (len)
|
||||
*len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
|
||||
|
||||
return sg_dma_address(sg) + (offset << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
dma_addr_t
|
||||
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
|
||||
unsigned long n)
|
||||
{
|
||||
return i915_gem_object_get_dma_address_len(obj, n, NULL);
|
||||
}
|
||||
|
||||
|
||||
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
|
||||
{
|
||||
struct sg_table *pages;
|
||||
int err;
|
||||
|
||||
if (align > obj->base.size)
|
||||
return -EINVAL;
|
||||
|
||||
if (obj->ops == &i915_gem_phys_ops)
|
||||
return 0;
|
||||
|
||||
if (obj->ops != &i915_gem_shmem_ops)
|
||||
return -EINVAL;
|
||||
|
||||
err = i915_gem_object_unbind(obj);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mutex_lock(&obj->mm.lock);
|
||||
|
||||
if (obj->mm.madv != I915_MADV_WILLNEED) {
|
||||
err = -EFAULT;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
if (obj->mm.quirked) {
|
||||
err = -EFAULT;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
if (obj->mm.mapping) {
|
||||
err = -EBUSY;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
pages = __i915_gem_object_unset_pages(obj);
|
||||
|
||||
obj->ops = &i915_gem_phys_ops;
|
||||
|
||||
err = ____i915_gem_object_get_pages(obj);
|
||||
if (err)
|
||||
goto err_xfer;
|
||||
|
||||
/* Perma-pin (until release) the physical set of pages */
|
||||
__i915_gem_object_pin_pages(obj);
|
||||
|
||||
if (!IS_ERR_OR_NULL(pages))
|
||||
i915_gem_shmem_ops.put_pages(obj, pages);
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
return 0;
|
||||
|
||||
err_xfer:
|
||||
obj->ops = &i915_gem_shmem_ops;
|
||||
if (!IS_ERR_OR_NULL(pages)) {
|
||||
unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
|
||||
|
||||
__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
|
||||
}
|
||||
err_unlock:
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
#include "selftests/scatterlist.c"
|
||||
#include "selftests/mock_gem_device.c"
|
||||
|
@ -114,65 +114,18 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
|
||||
return !i915_gem_object_has_pages(obj);
|
||||
}
|
||||
|
||||
static void __start_writeback(struct drm_i915_gem_object *obj,
|
||||
unsigned int flags)
|
||||
static void try_to_writeback(struct drm_i915_gem_object *obj,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct address_space *mapping;
|
||||
struct writeback_control wbc = {
|
||||
.sync_mode = WB_SYNC_NONE,
|
||||
.nr_to_write = SWAP_CLUSTER_MAX,
|
||||
.range_start = 0,
|
||||
.range_end = LLONG_MAX,
|
||||
.for_reclaim = 1,
|
||||
};
|
||||
unsigned long i;
|
||||
|
||||
lockdep_assert_held(&obj->mm.lock);
|
||||
GEM_BUG_ON(i915_gem_object_has_pages(obj));
|
||||
|
||||
switch (obj->mm.madv) {
|
||||
case I915_MADV_DONTNEED:
|
||||
__i915_gem_object_truncate(obj);
|
||||
i915_gem_object_truncate(obj);
|
||||
case __I915_MADV_PURGED:
|
||||
return;
|
||||
}
|
||||
|
||||
if (!obj->base.filp)
|
||||
return;
|
||||
|
||||
if (!(flags & I915_SHRINK_WRITEBACK))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Leave mmapings intact (GTT will have been revoked on unbinding,
|
||||
* leaving only CPU mmapings around) and add those pages to the LRU
|
||||
* instead of invoking writeback so they are aged and paged out
|
||||
* as normal.
|
||||
*/
|
||||
mapping = obj->base.filp->f_mapping;
|
||||
|
||||
/* Begin writeback on each dirty page */
|
||||
for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
|
||||
struct page *page;
|
||||
|
||||
page = find_lock_entry(mapping, i);
|
||||
if (!page || xa_is_value(page))
|
||||
continue;
|
||||
|
||||
if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
|
||||
int ret;
|
||||
|
||||
SetPageReclaim(page);
|
||||
ret = mapping->a_ops->writepage(page, &wbc);
|
||||
if (!PageWriteback(page))
|
||||
ClearPageReclaim(page);
|
||||
if (!ret)
|
||||
goto put;
|
||||
}
|
||||
unlock_page(page);
|
||||
put:
|
||||
put_page(page);
|
||||
}
|
||||
if (flags & I915_SHRINK_WRITEBACK)
|
||||
i915_gem_object_writeback(obj);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -315,7 +268,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
|
||||
mutex_lock_nested(&obj->mm.lock,
|
||||
I915_MM_SHRINKER);
|
||||
if (!i915_gem_object_has_pages(obj)) {
|
||||
__start_writeback(obj, flags);
|
||||
try_to_writeback(obj, flags);
|
||||
count += obj->base.size >> PAGE_SHIFT;
|
||||
}
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
|
@ -49,59 +49,6 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int igt_phys_object(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int err;
|
||||
|
||||
/* Create an object and bind it to a contiguous set of physical pages,
|
||||
* i.e. exercise the i915_gem_object_phys API.
|
||||
*/
|
||||
|
||||
obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
|
||||
if (IS_ERR(obj)) {
|
||||
err = PTR_ERR(obj);
|
||||
pr_err("i915_gem_object_create failed, err=%d\n", err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
if (err) {
|
||||
pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
|
||||
goto out_obj;
|
||||
}
|
||||
|
||||
if (obj->ops != &i915_gem_phys_ops) {
|
||||
pr_err("i915_gem_object_attach_phys did not create a phys object\n");
|
||||
err = -EINVAL;
|
||||
goto out_obj;
|
||||
}
|
||||
|
||||
if (!atomic_read(&obj->mm.pages_pin_count)) {
|
||||
pr_err("i915_gem_object_attach_phys did not pin its phys pages\n");
|
||||
err = -EINVAL;
|
||||
goto out_obj;
|
||||
}
|
||||
|
||||
/* Make the object dirty so that put_pages must do copy back the data */
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
err = i915_gem_object_set_to_gtt_domain(obj, true);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
if (err) {
|
||||
pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n",
|
||||
err);
|
||||
goto out_obj;
|
||||
}
|
||||
|
||||
out_obj:
|
||||
i915_gem_object_put(obj);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int igt_gem_huge(void *arg)
|
||||
{
|
||||
const unsigned int nreal = 509; /* just to be awkward */
|
||||
@ -631,7 +578,6 @@ int i915_gem_object_mock_selftests(void)
|
||||
{
|
||||
static const struct i915_subtest tests[] = {
|
||||
SUBTEST(igt_gem_object),
|
||||
SUBTEST(igt_phys_object),
|
||||
};
|
||||
struct drm_i915_private *i915;
|
||||
int err;
|
||||
|
@ -18,6 +18,7 @@ selftest(engine, intel_engine_cs_mock_selftests)
|
||||
selftest(timelines, i915_timeline_mock_selftests)
|
||||
selftest(requests, i915_request_mock_selftests)
|
||||
selftest(objects, i915_gem_object_mock_selftests)
|
||||
selftest(phys, i915_gem_phys_mock_selftests)
|
||||
selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
|
||||
selftest(vma, i915_vma_mock_selftests)
|
||||
selftest(evict, i915_gem_evict_mock_selftests)
|
||||
|
Loading…
Reference in New Issue
Block a user