forked from Minki/linux
e27ab73d17
Currently, we only mark the CPU cache as dirty if we skip a clflush.
This leads to some confusion where we have to ask if the object is in
the write domain or missed a clflush. If we always mark the cache as
dirty, this becomes a much simply question to answer.
The goal remains to do as few clflushes as required and to do them as
late as possible, in the hope of deferring the work to a kthread and not
block the caller (e.g. execbuf, flips).
v2: Always call clflush before GPU execution when the cache_dirty flag
is set. This may cause some extra work on llc systems that migrate dirty
buffers back and forth - but we do try to limit that by only setting
cache_dirty at the end of the gpu sequence.
v3: Always mark the cache as dirty upon a level change, as we need to
invalidate any stale cachelines due to external writes.
Reported-by: Dongwon Kim <dongwon.kim@intel.com>
Fixes: a6a7cc4b7d
("drm/i915: Always flush the dirty CPU cache when pinning the scanout")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Dongwon Kim <dongwon.kim@intel.com>
Cc: Matt Roper <matthew.d.roper@intel.com>
Tested-by: Dongwon Kim <dongwon.kim@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170615123850.26843-1-chris@chris-wilson.co.uk
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
1915 lines
50 KiB
C
1915 lines
50 KiB
C
/*
|
|
* Copyright © 2008,2010 Intel Corporation
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
* to deal in the Software without restriction, including without limitation
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice (including the next
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
* Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
* IN THE SOFTWARE.
|
|
*
|
|
* Authors:
|
|
* Eric Anholt <eric@anholt.net>
|
|
* Chris Wilson <chris@chris-wilson.co.uk>
|
|
*
|
|
*/
|
|
|
|
#include <linux/dma_remapping.h>
|
|
#include <linux/reservation.h>
|
|
#include <linux/sync_file.h>
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <drm/drmP.h>
|
|
#include <drm/i915_drm.h>
|
|
|
|
#include "i915_drv.h"
|
|
#include "i915_gem_clflush.h"
|
|
#include "i915_trace.h"
|
|
#include "intel_drv.h"
|
|
#include "intel_frontbuffer.h"
|
|
|
|
#define DBG_USE_CPU_RELOC 0 /* -1 force GTT relocs; 1 force CPU relocs */
|
|
|
|
#define __EXEC_OBJECT_HAS_PIN (1<<31)
|
|
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
|
|
#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
|
|
#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
|
|
#define __EXEC_OBJECT_INTERNAL_FLAGS (0xf<<28) /* all of the above */
|
|
|
|
#define BATCH_OFFSET_BIAS (256*1024)
|
|
|
|
#define __I915_EXEC_ILLEGAL_FLAGS \
|
|
(__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK)
|
|
|
|
struct i915_execbuffer {
|
|
struct drm_i915_private *i915;
|
|
struct drm_file *file;
|
|
struct drm_i915_gem_execbuffer2 *args;
|
|
struct drm_i915_gem_exec_object2 *exec;
|
|
struct intel_engine_cs *engine;
|
|
struct i915_gem_context *ctx;
|
|
struct i915_address_space *vm;
|
|
struct i915_vma *batch;
|
|
struct drm_i915_gem_request *request;
|
|
u32 batch_start_offset;
|
|
u32 batch_len;
|
|
unsigned int dispatch_flags;
|
|
struct drm_i915_gem_exec_object2 shadow_exec_entry;
|
|
bool need_relocs;
|
|
struct list_head vmas;
|
|
struct reloc_cache {
|
|
struct drm_mm_node node;
|
|
unsigned long vaddr;
|
|
unsigned int page;
|
|
bool use_64bit_reloc : 1;
|
|
} reloc_cache;
|
|
int and;
|
|
union {
|
|
struct i915_vma **lut;
|
|
struct hlist_head *buckets;
|
|
};
|
|
};
|
|
|
|
static int eb_create(struct i915_execbuffer *eb)
|
|
{
|
|
eb->lut = NULL;
|
|
if (eb->args->flags & I915_EXEC_HANDLE_LUT) {
|
|
unsigned int size = eb->args->buffer_count;
|
|
size *= sizeof(struct i915_vma *);
|
|
eb->lut = kmalloc(size,
|
|
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
|
|
}
|
|
|
|
if (!eb->lut) {
|
|
unsigned int size = eb->args->buffer_count;
|
|
unsigned int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
|
|
BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
|
|
while (count > 2*size)
|
|
count >>= 1;
|
|
eb->lut = kzalloc(count * sizeof(struct hlist_head),
|
|
GFP_TEMPORARY);
|
|
if (!eb->lut)
|
|
return -ENOMEM;
|
|
|
|
eb->and = count - 1;
|
|
} else {
|
|
eb->and = -eb->args->buffer_count;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline void
|
|
__eb_unreserve_vma(struct i915_vma *vma,
|
|
const struct drm_i915_gem_exec_object2 *entry)
|
|
{
|
|
if (unlikely(entry->flags & __EXEC_OBJECT_HAS_FENCE))
|
|
i915_vma_unpin_fence(vma);
|
|
|
|
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
|
|
__i915_vma_unpin(vma);
|
|
}
|
|
|
|
static void
|
|
eb_unreserve_vma(struct i915_vma *vma)
|
|
{
|
|
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
|
|
|
__eb_unreserve_vma(vma, entry);
|
|
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
|
|
}
|
|
|
|
static void
|
|
eb_reset(struct i915_execbuffer *eb)
|
|
{
|
|
struct i915_vma *vma;
|
|
|
|
list_for_each_entry(vma, &eb->vmas, exec_link) {
|
|
eb_unreserve_vma(vma);
|
|
i915_vma_put(vma);
|
|
vma->exec_entry = NULL;
|
|
}
|
|
|
|
if (eb->and >= 0)
|
|
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
|
|
}
|
|
|
|
static struct i915_vma *
|
|
eb_get_batch(struct i915_execbuffer *eb)
|
|
{
|
|
struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_link);
|
|
|
|
/*
|
|
* SNA is doing fancy tricks with compressing batch buffers, which leads
|
|
* to negative relocation deltas. Usually that works out ok since the
|
|
* relocate address is still positive, except when the batch is placed
|
|
* very low in the GTT. Ensure this doesn't happen.
|
|
*
|
|
* Note that actual hangs have only been observed on gen7, but for
|
|
* paranoia do it everywhere.
|
|
*/
|
|
if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
|
|
vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
|
|
|
|
return vma;
|
|
}
|
|
|
|
static int
|
|
eb_lookup_vmas(struct i915_execbuffer *eb)
|
|
{
|
|
struct drm_i915_gem_object *obj;
|
|
struct list_head objects;
|
|
int i, ret;
|
|
|
|
INIT_LIST_HEAD(&eb->vmas);
|
|
|
|
INIT_LIST_HEAD(&objects);
|
|
spin_lock(&eb->file->table_lock);
|
|
/* Grab a reference to the object and release the lock so we can lookup
|
|
* or create the VMA without using GFP_ATOMIC */
|
|
for (i = 0; i < eb->args->buffer_count; i++) {
|
|
obj = to_intel_bo(idr_find(&eb->file->object_idr, eb->exec[i].handle));
|
|
if (obj == NULL) {
|
|
spin_unlock(&eb->file->table_lock);
|
|
DRM_DEBUG("Invalid object handle %d at index %d\n",
|
|
eb->exec[i].handle, i);
|
|
ret = -ENOENT;
|
|
goto err;
|
|
}
|
|
|
|
if (!list_empty(&obj->obj_exec_link)) {
|
|
spin_unlock(&eb->file->table_lock);
|
|
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
|
|
obj, eb->exec[i].handle, i);
|
|
ret = -EINVAL;
|
|
goto err;
|
|
}
|
|
|
|
i915_gem_object_get(obj);
|
|
list_add_tail(&obj->obj_exec_link, &objects);
|
|
}
|
|
spin_unlock(&eb->file->table_lock);
|
|
|
|
i = 0;
|
|
while (!list_empty(&objects)) {
|
|
struct i915_vma *vma;
|
|
|
|
obj = list_first_entry(&objects,
|
|
struct drm_i915_gem_object,
|
|
obj_exec_link);
|
|
|
|
/*
|
|
* NOTE: We can leak any vmas created here when something fails
|
|
* later on. But that's no issue since vma_unbind can deal with
|
|
* vmas which are not actually bound. And since only
|
|
* lookup_or_create exists as an interface to get at the vma
|
|
* from the (obj, vm) we don't run the risk of creating
|
|
* duplicated vmas for the same vm.
|
|
*/
|
|
vma = i915_vma_instance(obj, eb->vm, NULL);
|
|
if (unlikely(IS_ERR(vma))) {
|
|
DRM_DEBUG("Failed to lookup VMA\n");
|
|
ret = PTR_ERR(vma);
|
|
goto err;
|
|
}
|
|
|
|
/* Transfer ownership from the objects list to the vmas list. */
|
|
list_add_tail(&vma->exec_link, &eb->vmas);
|
|
list_del_init(&obj->obj_exec_link);
|
|
|
|
vma->exec_entry = &eb->exec[i];
|
|
if (eb->and < 0) {
|
|
eb->lut[i] = vma;
|
|
} else {
|
|
u32 handle =
|
|
eb->args->flags & I915_EXEC_HANDLE_LUT ?
|
|
i : eb->exec[i].handle;
|
|
vma->exec_handle = handle;
|
|
hlist_add_head(&vma->exec_node,
|
|
&eb->buckets[handle & eb->and]);
|
|
}
|
|
++i;
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
|
err:
|
|
while (!list_empty(&objects)) {
|
|
obj = list_first_entry(&objects,
|
|
struct drm_i915_gem_object,
|
|
obj_exec_link);
|
|
list_del_init(&obj->obj_exec_link);
|
|
i915_gem_object_put(obj);
|
|
}
|
|
/*
|
|
* Objects already transfered to the vmas list will be unreferenced by
|
|
* eb_destroy.
|
|
*/
|
|
|
|
return ret;
|
|
}
|
|
|
|
static struct i915_vma *eb_get_vma(struct i915_execbuffer *eb, unsigned long handle)
|
|
{
|
|
if (eb->and < 0) {
|
|
if (handle >= -eb->and)
|
|
return NULL;
|
|
return eb->lut[handle];
|
|
} else {
|
|
struct hlist_head *head;
|
|
struct i915_vma *vma;
|
|
|
|
head = &eb->buckets[handle & eb->and];
|
|
hlist_for_each_entry(vma, head, exec_node) {
|
|
if (vma->exec_handle == handle)
|
|
return vma;
|
|
}
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
static void eb_destroy(struct i915_execbuffer *eb)
|
|
{
|
|
struct i915_vma *vma;
|
|
|
|
list_for_each_entry(vma, &eb->vmas, exec_link) {
|
|
if (!vma->exec_entry)
|
|
continue;
|
|
|
|
__eb_unreserve_vma(vma, vma->exec_entry);
|
|
vma->exec_entry = NULL;
|
|
i915_vma_put(vma);
|
|
}
|
|
|
|
i915_gem_context_put(eb->ctx);
|
|
|
|
if (eb->buckets)
|
|
kfree(eb->buckets);
|
|
}
|
|
|
|
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
|
|
{
|
|
if (!i915_gem_object_has_struct_page(obj))
|
|
return false;
|
|
|
|
if (DBG_USE_CPU_RELOC)
|
|
return DBG_USE_CPU_RELOC > 0;
|
|
|
|
return (HAS_LLC(to_i915(obj->base.dev)) ||
|
|
obj->cache_dirty ||
|
|
obj->cache_level != I915_CACHE_NONE);
|
|
}
|
|
|
|
/* Used to convert any address to canonical form.
|
|
* Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
|
|
* MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
|
|
* addresses to be in a canonical form:
|
|
* "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
|
|
* canonical form [63:48] == [47]."
|
|
*/
|
|
#define GEN8_HIGH_ADDRESS_BIT 47
|
|
static inline uint64_t gen8_canonical_addr(uint64_t address)
|
|
{
|
|
return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
|
|
}
|
|
|
|
static inline uint64_t gen8_noncanonical_addr(uint64_t address)
|
|
{
|
|
return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
|
|
}
|
|
|
|
static inline uint64_t
|
|
relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
|
|
uint64_t target_offset)
|
|
{
|
|
return gen8_canonical_addr((int)reloc->delta + target_offset);
|
|
}
|
|
|
|
static void reloc_cache_init(struct reloc_cache *cache,
|
|
struct drm_i915_private *i915)
|
|
{
|
|
cache->page = -1;
|
|
cache->vaddr = 0;
|
|
/* Must be a variable in the struct to allow GCC to unroll. */
|
|
cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
|
|
cache->node.allocated = false;
|
|
}
|
|
|
|
static inline void *unmask_page(unsigned long p)
|
|
{
|
|
return (void *)(uintptr_t)(p & PAGE_MASK);
|
|
}
|
|
|
|
static inline unsigned int unmask_flags(unsigned long p)
|
|
{
|
|
return p & ~PAGE_MASK;
|
|
}
|
|
|
|
#define KMAP 0x4 /* after CLFLUSH_FLAGS */
|
|
|
|
static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
|
|
{
|
|
struct drm_i915_private *i915 =
|
|
container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
|
|
return &i915->ggtt;
|
|
}
|
|
|
|
static void reloc_cache_reset(struct reloc_cache *cache)
|
|
{
|
|
void *vaddr;
|
|
|
|
if (!cache->vaddr)
|
|
return;
|
|
|
|
vaddr = unmask_page(cache->vaddr);
|
|
if (cache->vaddr & KMAP) {
|
|
if (cache->vaddr & CLFLUSH_AFTER)
|
|
mb();
|
|
|
|
kunmap_atomic(vaddr);
|
|
i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
|
|
} else {
|
|
wmb();
|
|
io_mapping_unmap_atomic((void __iomem *)vaddr);
|
|
if (cache->node.allocated) {
|
|
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
|
|
|
|
ggtt->base.clear_range(&ggtt->base,
|
|
cache->node.start,
|
|
cache->node.size);
|
|
drm_mm_remove_node(&cache->node);
|
|
} else {
|
|
i915_vma_unpin((struct i915_vma *)cache->node.mm);
|
|
}
|
|
}
|
|
|
|
cache->vaddr = 0;
|
|
cache->page = -1;
|
|
}
|
|
|
|
static void *reloc_kmap(struct drm_i915_gem_object *obj,
|
|
struct reloc_cache *cache,
|
|
int page)
|
|
{
|
|
void *vaddr;
|
|
|
|
if (cache->vaddr) {
|
|
kunmap_atomic(unmask_page(cache->vaddr));
|
|
} else {
|
|
unsigned int flushes;
|
|
int ret;
|
|
|
|
ret = i915_gem_obj_prepare_shmem_write(obj, &flushes);
|
|
if (ret)
|
|
return ERR_PTR(ret);
|
|
|
|
BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
|
|
BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
|
|
|
|
cache->vaddr = flushes | KMAP;
|
|
cache->node.mm = (void *)obj;
|
|
if (flushes)
|
|
mb();
|
|
}
|
|
|
|
vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
|
|
cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
|
|
cache->page = page;
|
|
|
|
return vaddr;
|
|
}
|
|
|
|
static void *reloc_iomap(struct drm_i915_gem_object *obj,
|
|
struct reloc_cache *cache,
|
|
int page)
|
|
{
|
|
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
|
|
unsigned long offset;
|
|
void *vaddr;
|
|
|
|
if (cache->vaddr) {
|
|
io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
|
|
} else {
|
|
struct i915_vma *vma;
|
|
int ret;
|
|
|
|
if (use_cpu_reloc(obj))
|
|
return NULL;
|
|
|
|
ret = i915_gem_object_set_to_gtt_domain(obj, true);
|
|
if (ret)
|
|
return ERR_PTR(ret);
|
|
|
|
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
|
|
PIN_MAPPABLE | PIN_NONBLOCK);
|
|
if (IS_ERR(vma)) {
|
|
memset(&cache->node, 0, sizeof(cache->node));
|
|
ret = drm_mm_insert_node_in_range
|
|
(&ggtt->base.mm, &cache->node,
|
|
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
|
|
0, ggtt->mappable_end,
|
|
DRM_MM_INSERT_LOW);
|
|
if (ret) /* no inactive aperture space, use cpu reloc */
|
|
return NULL;
|
|
} else {
|
|
ret = i915_vma_put_fence(vma);
|
|
if (ret) {
|
|
i915_vma_unpin(vma);
|
|
return ERR_PTR(ret);
|
|
}
|
|
|
|
cache->node.start = vma->node.start;
|
|
cache->node.mm = (void *)vma;
|
|
}
|
|
}
|
|
|
|
offset = cache->node.start;
|
|
if (cache->node.allocated) {
|
|
wmb();
|
|
ggtt->base.insert_page(&ggtt->base,
|
|
i915_gem_object_get_dma_address(obj, page),
|
|
offset, I915_CACHE_NONE, 0);
|
|
} else {
|
|
offset += page << PAGE_SHIFT;
|
|
}
|
|
|
|
vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->mappable,
|
|
offset);
|
|
cache->page = page;
|
|
cache->vaddr = (unsigned long)vaddr;
|
|
|
|
return vaddr;
|
|
}
|
|
|
|
static void *reloc_vaddr(struct drm_i915_gem_object *obj,
|
|
struct reloc_cache *cache,
|
|
int page)
|
|
{
|
|
void *vaddr;
|
|
|
|
if (cache->page == page) {
|
|
vaddr = unmask_page(cache->vaddr);
|
|
} else {
|
|
vaddr = NULL;
|
|
if ((cache->vaddr & KMAP) == 0)
|
|
vaddr = reloc_iomap(obj, cache, page);
|
|
if (!vaddr)
|
|
vaddr = reloc_kmap(obj, cache, page);
|
|
}
|
|
|
|
return vaddr;
|
|
}
|
|
|
|
static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
|
|
{
|
|
if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
|
|
if (flushes & CLFLUSH_BEFORE) {
|
|
clflushopt(addr);
|
|
mb();
|
|
}
|
|
|
|
*addr = value;
|
|
|
|
/* Writes to the same cacheline are serialised by the CPU
|
|
* (including clflush). On the write path, we only require
|
|
* that it hits memory in an orderly fashion and place
|
|
* mb barriers at the start and end of the relocation phase
|
|
* to ensure ordering of clflush wrt to the system.
|
|
*/
|
|
if (flushes & CLFLUSH_AFTER)
|
|
clflushopt(addr);
|
|
} else
|
|
*addr = value;
|
|
}
|
|
|
|
static int
|
|
relocate_entry(struct drm_i915_gem_object *obj,
|
|
const struct drm_i915_gem_relocation_entry *reloc,
|
|
struct reloc_cache *cache,
|
|
u64 target_offset)
|
|
{
|
|
u64 offset = reloc->offset;
|
|
bool wide = cache->use_64bit_reloc;
|
|
void *vaddr;
|
|
|
|
target_offset = relocation_target(reloc, target_offset);
|
|
repeat:
|
|
vaddr = reloc_vaddr(obj, cache, offset >> PAGE_SHIFT);
|
|
if (IS_ERR(vaddr))
|
|
return PTR_ERR(vaddr);
|
|
|
|
clflush_write32(vaddr + offset_in_page(offset),
|
|
lower_32_bits(target_offset),
|
|
cache->vaddr);
|
|
|
|
if (wide) {
|
|
offset += sizeof(u32);
|
|
target_offset >>= 32;
|
|
wide = false;
|
|
goto repeat;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
eb_relocate_entry(struct drm_i915_gem_object *obj,
|
|
struct i915_execbuffer *eb,
|
|
struct drm_i915_gem_relocation_entry *reloc)
|
|
{
|
|
struct drm_gem_object *target_obj;
|
|
struct drm_i915_gem_object *target_i915_obj;
|
|
struct i915_vma *target_vma;
|
|
uint64_t target_offset;
|
|
int ret;
|
|
|
|
/* we've already hold a reference to all valid objects */
|
|
target_vma = eb_get_vma(eb, reloc->target_handle);
|
|
if (unlikely(target_vma == NULL))
|
|
return -ENOENT;
|
|
target_i915_obj = target_vma->obj;
|
|
target_obj = &target_vma->obj->base;
|
|
|
|
target_offset = gen8_canonical_addr(target_vma->node.start);
|
|
|
|
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
|
|
* pipe_control writes because the gpu doesn't properly redirect them
|
|
* through the ppgtt for non_secure batchbuffers. */
|
|
if (unlikely(IS_GEN6(eb->i915) &&
|
|
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
|
|
ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
|
|
PIN_GLOBAL);
|
|
if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
|
|
return ret;
|
|
}
|
|
|
|
/* Validate that the target is in a valid r/w GPU domain */
|
|
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
|
|
DRM_DEBUG("reloc with multiple write domains: "
|
|
"obj %p target %d offset %d "
|
|
"read %08x write %08x",
|
|
obj, reloc->target_handle,
|
|
(int) reloc->offset,
|
|
reloc->read_domains,
|
|
reloc->write_domain);
|
|
return -EINVAL;
|
|
}
|
|
if (unlikely((reloc->write_domain | reloc->read_domains)
|
|
& ~I915_GEM_GPU_DOMAINS)) {
|
|
DRM_DEBUG("reloc with read/write non-GPU domains: "
|
|
"obj %p target %d offset %d "
|
|
"read %08x write %08x",
|
|
obj, reloc->target_handle,
|
|
(int) reloc->offset,
|
|
reloc->read_domains,
|
|
reloc->write_domain);
|
|
return -EINVAL;
|
|
}
|
|
|
|
target_obj->pending_read_domains |= reloc->read_domains;
|
|
target_obj->pending_write_domain |= reloc->write_domain;
|
|
|
|
/* If the relocation already has the right value in it, no
|
|
* more work needs to be done.
|
|
*/
|
|
if (target_offset == reloc->presumed_offset)
|
|
return 0;
|
|
|
|
/* Check that the relocation address is valid... */
|
|
if (unlikely(reloc->offset >
|
|
obj->base.size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
|
|
DRM_DEBUG("Relocation beyond object bounds: "
|
|
"obj %p target %d offset %d size %d.\n",
|
|
obj, reloc->target_handle,
|
|
(int) reloc->offset,
|
|
(int) obj->base.size);
|
|
return -EINVAL;
|
|
}
|
|
if (unlikely(reloc->offset & 3)) {
|
|
DRM_DEBUG("Relocation not 4-byte aligned: "
|
|
"obj %p target %d offset %d.\n",
|
|
obj, reloc->target_handle,
|
|
(int) reloc->offset);
|
|
return -EINVAL;
|
|
}
|
|
|
|
ret = relocate_entry(obj, reloc, &eb->reloc_cache, target_offset);
|
|
if (ret)
|
|
return ret;
|
|
|
|
/* and update the user's relocation entry */
|
|
reloc->presumed_offset = target_offset;
|
|
return 0;
|
|
}
|
|
|
|
static int eb_relocate_vma(struct i915_vma *vma, struct i915_execbuffer *eb)
|
|
{
|
|
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
|
|
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
|
|
struct drm_i915_gem_relocation_entry __user *user_relocs;
|
|
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
|
int remain, ret = 0;
|
|
|
|
user_relocs = u64_to_user_ptr(entry->relocs_ptr);
|
|
|
|
remain = entry->relocation_count;
|
|
while (remain) {
|
|
struct drm_i915_gem_relocation_entry *r = stack_reloc;
|
|
unsigned long unwritten;
|
|
unsigned int count;
|
|
|
|
count = min_t(unsigned int, remain, ARRAY_SIZE(stack_reloc));
|
|
remain -= count;
|
|
|
|
/* This is the fast path and we cannot handle a pagefault
|
|
* whilst holding the struct mutex lest the user pass in the
|
|
* relocations contained within a mmaped bo. For in such a case
|
|
* we, the page fault handler would call i915_gem_fault() and
|
|
* we would try to acquire the struct mutex again. Obviously
|
|
* this is bad and so lockdep complains vehemently.
|
|
*/
|
|
pagefault_disable();
|
|
unwritten = __copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]));
|
|
pagefault_enable();
|
|
if (unlikely(unwritten)) {
|
|
ret = -EFAULT;
|
|
goto out;
|
|
}
|
|
|
|
do {
|
|
u64 offset = r->presumed_offset;
|
|
|
|
ret = eb_relocate_entry(vma->obj, eb, r);
|
|
if (ret)
|
|
goto out;
|
|
|
|
if (r->presumed_offset != offset) {
|
|
pagefault_disable();
|
|
unwritten = __put_user(r->presumed_offset,
|
|
&user_relocs->presumed_offset);
|
|
pagefault_enable();
|
|
if (unlikely(unwritten)) {
|
|
/* Note that reporting an error now
|
|
* leaves everything in an inconsistent
|
|
* state as we have *already* changed
|
|
* the relocation value inside the
|
|
* object. As we have not changed the
|
|
* reloc.presumed_offset or will not
|
|
* change the execobject.offset, on the
|
|
* call we may not rewrite the value
|
|
* inside the object, leaving it
|
|
* dangling and causing a GPU hang.
|
|
*/
|
|
ret = -EFAULT;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
user_relocs++;
|
|
r++;
|
|
} while (--count);
|
|
}
|
|
|
|
out:
|
|
reloc_cache_reset(&eb->reloc_cache);
|
|
return ret;
|
|
#undef N_RELOC
|
|
}
|
|
|
|
static int
|
|
eb_relocate_vma_slow(struct i915_vma *vma,
|
|
struct i915_execbuffer *eb,
|
|
struct drm_i915_gem_relocation_entry *relocs)
|
|
{
|
|
const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
|
int i, ret = 0;
|
|
|
|
for (i = 0; i < entry->relocation_count; i++) {
|
|
ret = eb_relocate_entry(vma->obj, eb, &relocs[i]);
|
|
if (ret)
|
|
break;
|
|
}
|
|
reloc_cache_reset(&eb->reloc_cache);
|
|
return ret;
|
|
}
|
|
|
|
static int eb_relocate(struct i915_execbuffer *eb)
|
|
{
|
|
struct i915_vma *vma;
|
|
int ret = 0;
|
|
|
|
list_for_each_entry(vma, &eb->vmas, exec_link) {
|
|
ret = eb_relocate_vma(vma, eb);
|
|
if (ret)
|
|
break;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static bool only_mappable_for_reloc(unsigned int flags)
|
|
{
|
|
return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
|
|
__EXEC_OBJECT_NEEDS_MAP;
|
|
}
|
|
|
|
static int
|
|
eb_reserve_vma(struct i915_vma *vma,
|
|
struct intel_engine_cs *engine,
|
|
bool *need_reloc)
|
|
{
|
|
struct drm_i915_gem_object *obj = vma->obj;
|
|
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
|
uint64_t flags;
|
|
int ret;
|
|
|
|
flags = PIN_USER;
|
|
if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
|
|
flags |= PIN_GLOBAL;
|
|
|
|
if (!drm_mm_node_allocated(&vma->node)) {
|
|
/* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
|
|
* limit address to the first 4GBs for unflagged objects.
|
|
*/
|
|
if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
|
|
flags |= PIN_ZONE_4G;
|
|
if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
|
|
flags |= PIN_GLOBAL | PIN_MAPPABLE;
|
|
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
|
|
flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
|
|
if (entry->flags & EXEC_OBJECT_PINNED)
|
|
flags |= entry->offset | PIN_OFFSET_FIXED;
|
|
if ((flags & PIN_MAPPABLE) == 0)
|
|
flags |= PIN_HIGH;
|
|
}
|
|
|
|
ret = i915_vma_pin(vma,
|
|
entry->pad_to_size,
|
|
entry->alignment,
|
|
flags);
|
|
if ((ret == -ENOSPC || ret == -E2BIG) &&
|
|
only_mappable_for_reloc(entry->flags))
|
|
ret = i915_vma_pin(vma,
|
|
entry->pad_to_size,
|
|
entry->alignment,
|
|
flags & ~PIN_MAPPABLE);
|
|
if (ret)
|
|
return ret;
|
|
|
|
entry->flags |= __EXEC_OBJECT_HAS_PIN;
|
|
|
|
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
|
|
ret = i915_vma_get_fence(vma);
|
|
if (ret)
|
|
return ret;
|
|
|
|
if (i915_vma_pin_fence(vma))
|
|
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
|
|
}
|
|
|
|
if (entry->offset != vma->node.start) {
|
|
entry->offset = vma->node.start;
|
|
*need_reloc = true;
|
|
}
|
|
|
|
if (entry->flags & EXEC_OBJECT_WRITE) {
|
|
obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
|
|
obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static bool
|
|
need_reloc_mappable(struct i915_vma *vma)
|
|
{
|
|
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
|
|
|
if (entry->relocation_count == 0)
|
|
return false;
|
|
|
|
if (!i915_vma_is_ggtt(vma))
|
|
return false;
|
|
|
|
/* See also use_cpu_reloc() */
|
|
if (HAS_LLC(to_i915(vma->obj->base.dev)))
|
|
return false;
|
|
|
|
if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool
|
|
eb_vma_misplaced(struct i915_vma *vma)
|
|
{
|
|
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
|
|
|
WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
|
|
!i915_vma_is_ggtt(vma));
|
|
|
|
if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
|
|
return true;
|
|
|
|
if (vma->node.size < entry->pad_to_size)
|
|
return true;
|
|
|
|
if (entry->flags & EXEC_OBJECT_PINNED &&
|
|
vma->node.start != entry->offset)
|
|
return true;
|
|
|
|
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
|
|
vma->node.start < BATCH_OFFSET_BIAS)
|
|
return true;
|
|
|
|
/* avoid costly ping-pong once a batch bo ended up non-mappable */
|
|
if (entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
|
|
!i915_vma_is_map_and_fenceable(vma))
|
|
return !only_mappable_for_reloc(entry->flags);
|
|
|
|
if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
|
|
(vma->node.start + vma->node.size - 1) >> 32)
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
static int eb_reserve(struct i915_execbuffer *eb)
|
|
{
|
|
const bool has_fenced_gpu_access = INTEL_GEN(eb->i915) < 4;
|
|
const bool needs_unfenced_map = INTEL_INFO(eb->i915)->unfenced_needs_alignment;
|
|
struct drm_i915_gem_object *obj;
|
|
struct i915_vma *vma;
|
|
struct list_head ordered_vmas;
|
|
struct list_head pinned_vmas;
|
|
int retry;
|
|
|
|
INIT_LIST_HEAD(&ordered_vmas);
|
|
INIT_LIST_HEAD(&pinned_vmas);
|
|
while (!list_empty(&eb->vmas)) {
|
|
struct drm_i915_gem_exec_object2 *entry;
|
|
bool need_fence, need_mappable;
|
|
|
|
vma = list_first_entry(&eb->vmas, struct i915_vma, exec_link);
|
|
obj = vma->obj;
|
|
entry = vma->exec_entry;
|
|
|
|
if (eb->ctx->flags & CONTEXT_NO_ZEROMAP)
|
|
entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
|
|
|
|
if (!has_fenced_gpu_access)
|
|
entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
|
|
need_fence =
|
|
(entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
|
|
needs_unfenced_map) &&
|
|
i915_gem_object_is_tiled(obj);
|
|
need_mappable = need_fence || need_reloc_mappable(vma);
|
|
|
|
if (entry->flags & EXEC_OBJECT_PINNED)
|
|
list_move_tail(&vma->exec_link, &pinned_vmas);
|
|
else if (need_mappable) {
|
|
entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
|
|
list_move(&vma->exec_link, &ordered_vmas);
|
|
} else
|
|
list_move_tail(&vma->exec_link, &ordered_vmas);
|
|
|
|
obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
|
|
obj->base.pending_write_domain = 0;
|
|
}
|
|
list_splice(&ordered_vmas, &eb->vmas);
|
|
list_splice(&pinned_vmas, &eb->vmas);
|
|
|
|
/* Attempt to pin all of the buffers into the GTT.
|
|
* This is done in 3 phases:
|
|
*
|
|
* 1a. Unbind all objects that do not match the GTT constraints for
|
|
* the execbuffer (fenceable, mappable, alignment etc).
|
|
* 1b. Increment pin count for already bound objects.
|
|
* 2. Bind new objects.
|
|
* 3. Decrement pin count.
|
|
*
|
|
* This avoid unnecessary unbinding of later objects in order to make
|
|
* room for the earlier objects *unless* we need to defragment.
|
|
*/
|
|
retry = 0;
|
|
do {
|
|
int ret = 0;
|
|
|
|
/* Unbind any ill-fitting objects or pin. */
|
|
list_for_each_entry(vma, &eb->vmas, exec_link) {
|
|
if (!drm_mm_node_allocated(&vma->node))
|
|
continue;
|
|
|
|
if (eb_vma_misplaced(vma))
|
|
ret = i915_vma_unbind(vma);
|
|
else
|
|
ret = eb_reserve_vma(vma, eb->engine, &eb->need_relocs);
|
|
if (ret)
|
|
goto err;
|
|
}
|
|
|
|
/* Bind fresh objects */
|
|
list_for_each_entry(vma, &eb->vmas, exec_link) {
|
|
if (drm_mm_node_allocated(&vma->node))
|
|
continue;
|
|
|
|
ret = eb_reserve_vma(vma, eb->engine, &eb->need_relocs);
|
|
if (ret)
|
|
goto err;
|
|
}
|
|
|
|
err:
|
|
if (ret != -ENOSPC || retry++)
|
|
return ret;
|
|
|
|
/* Decrement pin count for bound objects */
|
|
list_for_each_entry(vma, &eb->vmas, exec_link)
|
|
eb_unreserve_vma(vma);
|
|
|
|
ret = i915_gem_evict_vm(eb->vm, true);
|
|
if (ret)
|
|
return ret;
|
|
} while (1);
|
|
}
|
|
|
|
static int
|
|
eb_relocate_slow(struct i915_execbuffer *eb)
|
|
{
|
|
const unsigned int count = eb->args->buffer_count;
|
|
struct drm_device *dev = &eb->i915->drm;
|
|
struct drm_i915_gem_relocation_entry *reloc;
|
|
struct i915_vma *vma;
|
|
int *reloc_offset;
|
|
int i, total, ret;
|
|
|
|
/* We may process another execbuffer during the unlock... */
|
|
eb_reset(eb);
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
total = 0;
|
|
for (i = 0; i < count; i++)
|
|
total += eb->exec[i].relocation_count;
|
|
|
|
reloc_offset = kvmalloc_array(count, sizeof(*reloc_offset), GFP_KERNEL);
|
|
reloc = kvmalloc_array(total, sizeof(*reloc), GFP_KERNEL);
|
|
if (reloc == NULL || reloc_offset == NULL) {
|
|
kvfree(reloc);
|
|
kvfree(reloc_offset);
|
|
mutex_lock(&dev->struct_mutex);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
total = 0;
|
|
for (i = 0; i < count; i++) {
|
|
struct drm_i915_gem_relocation_entry __user *user_relocs;
|
|
u64 invalid_offset = (u64)-1;
|
|
int j;
|
|
|
|
user_relocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
|
|
|
|
if (copy_from_user(reloc+total, user_relocs,
|
|
eb->exec[i].relocation_count * sizeof(*reloc))) {
|
|
ret = -EFAULT;
|
|
mutex_lock(&dev->struct_mutex);
|
|
goto err;
|
|
}
|
|
|
|
/* As we do not update the known relocation offsets after
|
|
* relocating (due to the complexities in lock handling),
|
|
* we need to mark them as invalid now so that we force the
|
|
* relocation processing next time. Just in case the target
|
|
* object is evicted and then rebound into its old
|
|
* presumed_offset before the next execbuffer - if that
|
|
* happened we would make the mistake of assuming that the
|
|
* relocations were valid.
|
|
*/
|
|
for (j = 0; j < eb->exec[i].relocation_count; j++) {
|
|
if (__copy_to_user(&user_relocs[j].presumed_offset,
|
|
&invalid_offset,
|
|
sizeof(invalid_offset))) {
|
|
ret = -EFAULT;
|
|
mutex_lock(&dev->struct_mutex);
|
|
goto err;
|
|
}
|
|
}
|
|
|
|
reloc_offset[i] = total;
|
|
total += eb->exec[i].relocation_count;
|
|
}
|
|
|
|
ret = i915_mutex_lock_interruptible(dev);
|
|
if (ret) {
|
|
mutex_lock(&dev->struct_mutex);
|
|
goto err;
|
|
}
|
|
|
|
/* reacquire the objects */
|
|
ret = eb_lookup_vmas(eb);
|
|
if (ret)
|
|
goto err;
|
|
|
|
ret = eb_reserve(eb);
|
|
if (ret)
|
|
goto err;
|
|
|
|
list_for_each_entry(vma, &eb->vmas, exec_link) {
|
|
int idx = vma->exec_entry - eb->exec;
|
|
|
|
ret = eb_relocate_vma_slow(vma, eb, reloc + reloc_offset[idx]);
|
|
if (ret)
|
|
goto err;
|
|
}
|
|
|
|
/* Leave the user relocations as are, this is the painfully slow path,
|
|
* and we want to avoid the complication of dropping the lock whilst
|
|
* having buffers reserved in the aperture and so causing spurious
|
|
* ENOSPC for random operations.
|
|
*/
|
|
|
|
err:
|
|
kvfree(reloc);
|
|
kvfree(reloc_offset);
|
|
return ret;
|
|
}
|
|
|
|
static int
|
|
eb_move_to_gpu(struct i915_execbuffer *eb)
|
|
{
|
|
struct i915_vma *vma;
|
|
int ret;
|
|
|
|
list_for_each_entry(vma, &eb->vmas, exec_link) {
|
|
struct drm_i915_gem_object *obj = vma->obj;
|
|
|
|
if (vma->exec_entry->flags & EXEC_OBJECT_CAPTURE) {
|
|
struct i915_gem_capture_list *capture;
|
|
|
|
capture = kmalloc(sizeof(*capture), GFP_KERNEL);
|
|
if (unlikely(!capture))
|
|
return -ENOMEM;
|
|
|
|
capture->next = eb->request->capture_list;
|
|
capture->vma = vma;
|
|
eb->request->capture_list = capture;
|
|
}
|
|
|
|
if (vma->exec_entry->flags & EXEC_OBJECT_ASYNC)
|
|
continue;
|
|
|
|
if (obj->cache_dirty)
|
|
i915_gem_clflush_object(obj, 0);
|
|
|
|
ret = i915_gem_request_await_object
|
|
(eb->request, obj, obj->base.pending_write_domain);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
/* Unconditionally flush any chipset caches (for streaming writes). */
|
|
i915_gem_chipset_flush(eb->i915);
|
|
|
|
/* Unconditionally invalidate GPU caches and TLBs. */
|
|
return eb->engine->emit_flush(eb->request, EMIT_INVALIDATE);
|
|
}
|
|
|
|
static bool
|
|
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
|
|
{
|
|
if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
|
|
return false;
|
|
|
|
/* Kernel clipping was a DRI1 misfeature */
|
|
if (exec->num_cliprects || exec->cliprects_ptr)
|
|
return false;
|
|
|
|
if (exec->DR4 == 0xffffffff) {
|
|
DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
|
|
exec->DR4 = 0;
|
|
}
|
|
if (exec->DR1 || exec->DR4)
|
|
return false;
|
|
|
|
if ((exec->batch_start_offset | exec->batch_len) & 0x7)
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
|
|
static int
|
|
validate_exec_list(struct drm_device *dev,
|
|
struct drm_i915_gem_exec_object2 *exec,
|
|
int count)
|
|
{
|
|
unsigned relocs_total = 0;
|
|
unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
|
|
unsigned invalid_flags;
|
|
int i;
|
|
|
|
/* INTERNAL flags must not overlap with external ones */
|
|
BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS);
|
|
|
|
invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
|
|
if (USES_FULL_PPGTT(dev))
|
|
invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
|
|
|
|
for (i = 0; i < count; i++) {
|
|
char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
|
|
int length; /* limited by fault_in_pages_readable() */
|
|
|
|
if (exec[i].flags & invalid_flags)
|
|
return -EINVAL;
|
|
|
|
/* Offset can be used as input (EXEC_OBJECT_PINNED), reject
|
|
* any non-page-aligned or non-canonical addresses.
|
|
*/
|
|
if (exec[i].flags & EXEC_OBJECT_PINNED) {
|
|
if (exec[i].offset !=
|
|
gen8_canonical_addr(exec[i].offset & PAGE_MASK))
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* From drm_mm perspective address space is continuous,
|
|
* so from this point we're always using non-canonical
|
|
* form internally.
|
|
*/
|
|
exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
|
|
|
|
if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
|
|
return -EINVAL;
|
|
|
|
/* pad_to_size was once a reserved field, so sanitize it */
|
|
if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) {
|
|
if (offset_in_page(exec[i].pad_to_size))
|
|
return -EINVAL;
|
|
} else {
|
|
exec[i].pad_to_size = 0;
|
|
}
|
|
|
|
/* First check for malicious input causing overflow in
|
|
* the worst case where we need to allocate the entire
|
|
* relocation tree as a single array.
|
|
*/
|
|
if (exec[i].relocation_count > relocs_max - relocs_total)
|
|
return -EINVAL;
|
|
relocs_total += exec[i].relocation_count;
|
|
|
|
length = exec[i].relocation_count *
|
|
sizeof(struct drm_i915_gem_relocation_entry);
|
|
/*
|
|
* We must check that the entire relocation array is safe
|
|
* to read, but since we may need to update the presumed
|
|
* offsets during execution, check for full write access.
|
|
*/
|
|
if (!access_ok(VERIFY_WRITE, ptr, length))
|
|
return -EFAULT;
|
|
|
|
if (likely(!i915.prefault_disable)) {
|
|
if (fault_in_pages_readable(ptr, length))
|
|
return -EFAULT;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int eb_select_context(struct i915_execbuffer *eb)
|
|
{
|
|
unsigned int ctx_id = i915_execbuffer2_get_context_id(*eb->args);
|
|
struct i915_gem_context *ctx;
|
|
|
|
ctx = i915_gem_context_lookup(eb->file->driver_priv, ctx_id);
|
|
if (unlikely(IS_ERR(ctx)))
|
|
return PTR_ERR(ctx);
|
|
|
|
if (unlikely(i915_gem_context_is_banned(ctx))) {
|
|
DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
|
|
return -EIO;
|
|
}
|
|
|
|
eb->ctx = i915_gem_context_get(ctx);
|
|
eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;
|
|
|
|
return 0;
|
|
}
|
|
|
|
void i915_vma_move_to_active(struct i915_vma *vma,
|
|
struct drm_i915_gem_request *req,
|
|
unsigned int flags)
|
|
{
|
|
struct drm_i915_gem_object *obj = vma->obj;
|
|
const unsigned int idx = req->engine->id;
|
|
|
|
lockdep_assert_held(&req->i915->drm.struct_mutex);
|
|
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
|
|
|
|
/* Add a reference if we're newly entering the active list.
|
|
* The order in which we add operations to the retirement queue is
|
|
* vital here: mark_active adds to the start of the callback list,
|
|
* such that subsequent callbacks are called first. Therefore we
|
|
* add the active reference first and queue for it to be dropped
|
|
* *last*.
|
|
*/
|
|
if (!i915_vma_is_active(vma))
|
|
obj->active_count++;
|
|
i915_vma_set_active(vma, idx);
|
|
i915_gem_active_set(&vma->last_read[idx], req);
|
|
list_move_tail(&vma->vm_link, &vma->vm->active_list);
|
|
|
|
obj->base.write_domain = 0;
|
|
if (flags & EXEC_OBJECT_WRITE) {
|
|
obj->base.write_domain = I915_GEM_DOMAIN_RENDER;
|
|
|
|
if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
|
|
i915_gem_active_set(&obj->frontbuffer_write, req);
|
|
|
|
obj->base.read_domains = 0;
|
|
}
|
|
obj->base.read_domains |= I915_GEM_GPU_DOMAINS;
|
|
|
|
if (flags & EXEC_OBJECT_NEEDS_FENCE)
|
|
i915_gem_active_set(&vma->last_fence, req);
|
|
}
|
|
|
|
static void eb_export_fence(struct drm_i915_gem_object *obj,
|
|
struct drm_i915_gem_request *req,
|
|
unsigned int flags)
|
|
{
|
|
struct reservation_object *resv = obj->resv;
|
|
|
|
/* Ignore errors from failing to allocate the new fence, we can't
|
|
* handle an error right now. Worst case should be missed
|
|
* synchronisation leading to rendering corruption.
|
|
*/
|
|
reservation_object_lock(resv, NULL);
|
|
if (flags & EXEC_OBJECT_WRITE)
|
|
reservation_object_add_excl_fence(resv, &req->fence);
|
|
else if (reservation_object_reserve_shared(resv) == 0)
|
|
reservation_object_add_shared_fence(resv, &req->fence);
|
|
reservation_object_unlock(resv);
|
|
}
|
|
|
|
static void
|
|
eb_move_to_active(struct i915_execbuffer *eb)
|
|
{
|
|
struct i915_vma *vma;
|
|
|
|
list_for_each_entry(vma, &eb->vmas, exec_link) {
|
|
struct drm_i915_gem_object *obj = vma->obj;
|
|
|
|
obj->base.write_domain = obj->base.pending_write_domain;
|
|
if (obj->base.write_domain)
|
|
vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
|
|
else
|
|
obj->base.pending_read_domains |= obj->base.read_domains;
|
|
obj->base.read_domains = obj->base.pending_read_domains;
|
|
|
|
i915_vma_move_to_active(vma, eb->request, vma->exec_entry->flags);
|
|
eb_export_fence(obj, eb->request, vma->exec_entry->flags);
|
|
}
|
|
}
|
|
|
|
static int
|
|
i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
|
|
{
|
|
u32 *cs;
|
|
int i;
|
|
|
|
if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
|
|
DRM_DEBUG("sol reset is gen7/rcs only\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
cs = intel_ring_begin(req, 4 * 3);
|
|
if (IS_ERR(cs))
|
|
return PTR_ERR(cs);
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
*cs++ = MI_LOAD_REGISTER_IMM(1);
|
|
*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
|
|
*cs++ = 0;
|
|
}
|
|
|
|
intel_ring_advance(req, cs);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
|
|
{
|
|
struct drm_i915_gem_object *shadow_batch_obj;
|
|
struct i915_vma *vma;
|
|
int ret;
|
|
|
|
shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
|
|
PAGE_ALIGN(eb->batch_len));
|
|
if (IS_ERR(shadow_batch_obj))
|
|
return ERR_CAST(shadow_batch_obj);
|
|
|
|
ret = intel_engine_cmd_parser(eb->engine,
|
|
eb->batch->obj,
|
|
shadow_batch_obj,
|
|
eb->batch_start_offset,
|
|
eb->batch_len,
|
|
is_master);
|
|
if (ret) {
|
|
if (ret == -EACCES) /* unhandled chained batch */
|
|
vma = NULL;
|
|
else
|
|
vma = ERR_PTR(ret);
|
|
goto out;
|
|
}
|
|
|
|
vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
|
|
if (IS_ERR(vma))
|
|
goto out;
|
|
|
|
vma->exec_entry =
|
|
memset(&eb->shadow_exec_entry, 0, sizeof(*vma->exec_entry));
|
|
vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
|
|
i915_gem_object_get(shadow_batch_obj);
|
|
list_add_tail(&vma->exec_link, &eb->vmas);
|
|
|
|
out:
|
|
i915_gem_object_unpin_pages(shadow_batch_obj);
|
|
return vma;
|
|
}
|
|
|
|
static void
|
|
add_to_client(struct drm_i915_gem_request *req,
|
|
struct drm_file *file)
|
|
{
|
|
req->file_priv = file->driver_priv;
|
|
list_add_tail(&req->client_link, &req->file_priv->mm.request_list);
|
|
}
|
|
|
|
static int
|
|
execbuf_submit(struct i915_execbuffer *eb)
|
|
{
|
|
int ret;
|
|
|
|
ret = eb_move_to_gpu(eb);
|
|
if (ret)
|
|
return ret;
|
|
|
|
ret = i915_switch_context(eb->request);
|
|
if (ret)
|
|
return ret;
|
|
|
|
if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
|
|
ret = i915_reset_gen7_sol_offsets(eb->request);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
ret = eb->engine->emit_bb_start(eb->request,
|
|
eb->batch->node.start +
|
|
eb->batch_start_offset,
|
|
eb->batch_len,
|
|
eb->dispatch_flags);
|
|
if (ret)
|
|
return ret;
|
|
|
|
eb_move_to_active(eb);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* Find one BSD ring to dispatch the corresponding BSD command.
|
|
* The engine index is returned.
|
|
*/
|
|
static unsigned int
|
|
gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
|
|
struct drm_file *file)
|
|
{
|
|
struct drm_i915_file_private *file_priv = file->driver_priv;
|
|
|
|
/* Check whether the file_priv has already selected one ring. */
|
|
if ((int)file_priv->bsd_engine < 0)
|
|
file_priv->bsd_engine = atomic_fetch_xor(1,
|
|
&dev_priv->mm.bsd_engine_dispatch_index);
|
|
|
|
return file_priv->bsd_engine;
|
|
}
|
|
|
|
#define I915_USER_RINGS (4)
|
|
|
|
static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
|
|
[I915_EXEC_DEFAULT] = RCS,
|
|
[I915_EXEC_RENDER] = RCS,
|
|
[I915_EXEC_BLT] = BCS,
|
|
[I915_EXEC_BSD] = VCS,
|
|
[I915_EXEC_VEBOX] = VECS
|
|
};
|
|
|
|
static struct intel_engine_cs *
|
|
eb_select_engine(struct drm_i915_private *dev_priv,
|
|
struct drm_file *file,
|
|
struct drm_i915_gem_execbuffer2 *args)
|
|
{
|
|
unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
|
|
struct intel_engine_cs *engine;
|
|
|
|
if (user_ring_id > I915_USER_RINGS) {
|
|
DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
|
|
return NULL;
|
|
}
|
|
|
|
if ((user_ring_id != I915_EXEC_BSD) &&
|
|
((args->flags & I915_EXEC_BSD_MASK) != 0)) {
|
|
DRM_DEBUG("execbuf with non bsd ring but with invalid "
|
|
"bsd dispatch flags: %d\n", (int)(args->flags));
|
|
return NULL;
|
|
}
|
|
|
|
if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
|
|
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
|
|
|
|
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
|
|
bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
|
|
} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
|
|
bsd_idx <= I915_EXEC_BSD_RING2) {
|
|
bsd_idx >>= I915_EXEC_BSD_SHIFT;
|
|
bsd_idx--;
|
|
} else {
|
|
DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
|
|
bsd_idx);
|
|
return NULL;
|
|
}
|
|
|
|
engine = dev_priv->engine[_VCS(bsd_idx)];
|
|
} else {
|
|
engine = dev_priv->engine[user_ring_map[user_ring_id]];
|
|
}
|
|
|
|
if (!engine) {
|
|
DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
|
|
return NULL;
|
|
}
|
|
|
|
return engine;
|
|
}
|
|
|
|
static int
|
|
i915_gem_do_execbuffer(struct drm_device *dev,
|
|
struct drm_file *file,
|
|
struct drm_i915_gem_execbuffer2 *args,
|
|
struct drm_i915_gem_exec_object2 *exec)
|
|
{
|
|
struct i915_execbuffer eb;
|
|
struct dma_fence *in_fence = NULL;
|
|
struct sync_file *out_fence = NULL;
|
|
int out_fence_fd = -1;
|
|
int ret;
|
|
|
|
if (!i915_gem_check_execbuffer(args))
|
|
return -EINVAL;
|
|
|
|
ret = validate_exec_list(dev, exec, args->buffer_count);
|
|
if (ret)
|
|
return ret;
|
|
|
|
eb.i915 = to_i915(dev);
|
|
eb.file = file;
|
|
eb.args = args;
|
|
eb.exec = exec;
|
|
eb.need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
|
|
reloc_cache_init(&eb.reloc_cache, eb.i915);
|
|
|
|
eb.batch_start_offset = args->batch_start_offset;
|
|
eb.batch_len = args->batch_len;
|
|
|
|
eb.dispatch_flags = 0;
|
|
if (args->flags & I915_EXEC_SECURE) {
|
|
if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
|
|
return -EPERM;
|
|
|
|
eb.dispatch_flags |= I915_DISPATCH_SECURE;
|
|
}
|
|
if (args->flags & I915_EXEC_IS_PINNED)
|
|
eb.dispatch_flags |= I915_DISPATCH_PINNED;
|
|
|
|
eb.engine = eb_select_engine(eb.i915, file, args);
|
|
if (!eb.engine)
|
|
return -EINVAL;
|
|
|
|
if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
|
|
if (!HAS_RESOURCE_STREAMER(eb.i915)) {
|
|
DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
|
|
return -EINVAL;
|
|
}
|
|
if (eb.engine->id != RCS) {
|
|
DRM_DEBUG("RS is not available on %s\n",
|
|
eb.engine->name);
|
|
return -EINVAL;
|
|
}
|
|
|
|
eb.dispatch_flags |= I915_DISPATCH_RS;
|
|
}
|
|
|
|
if (args->flags & I915_EXEC_FENCE_IN) {
|
|
in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
|
|
if (!in_fence)
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (args->flags & I915_EXEC_FENCE_OUT) {
|
|
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
|
|
if (out_fence_fd < 0) {
|
|
ret = out_fence_fd;
|
|
goto err_in_fence;
|
|
}
|
|
}
|
|
|
|
/* Take a local wakeref for preparing to dispatch the execbuf as
|
|
* we expect to access the hardware fairly frequently in the
|
|
* process. Upon first dispatch, we acquire another prolonged
|
|
* wakeref that we hold until the GPU has been idle for at least
|
|
* 100ms.
|
|
*/
|
|
intel_runtime_pm_get(eb.i915);
|
|
|
|
ret = i915_mutex_lock_interruptible(dev);
|
|
if (ret)
|
|
goto pre_mutex_err;
|
|
|
|
ret = eb_select_context(&eb);
|
|
if (ret) {
|
|
mutex_unlock(&dev->struct_mutex);
|
|
goto pre_mutex_err;
|
|
}
|
|
|
|
if (eb_create(&eb)) {
|
|
i915_gem_context_put(eb.ctx);
|
|
mutex_unlock(&dev->struct_mutex);
|
|
ret = -ENOMEM;
|
|
goto pre_mutex_err;
|
|
}
|
|
|
|
/* Look up object handles */
|
|
ret = eb_lookup_vmas(&eb);
|
|
if (ret)
|
|
goto err;
|
|
|
|
/* take note of the batch buffer before we might reorder the lists */
|
|
eb.batch = eb_get_batch(&eb);
|
|
|
|
/* Move the objects en-masse into the GTT, evicting if necessary. */
|
|
ret = eb_reserve(&eb);
|
|
if (ret)
|
|
goto err;
|
|
|
|
/* The objects are in their final locations, apply the relocations. */
|
|
if (eb.need_relocs)
|
|
ret = eb_relocate(&eb);
|
|
if (ret) {
|
|
if (ret == -EFAULT) {
|
|
ret = eb_relocate_slow(&eb);
|
|
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
|
|
}
|
|
if (ret)
|
|
goto err;
|
|
}
|
|
|
|
/* Set the pending read domains for the batch buffer to COMMAND */
|
|
if (eb.batch->obj->base.pending_write_domain) {
|
|
DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
|
|
ret = -EINVAL;
|
|
goto err;
|
|
}
|
|
if (eb.batch_start_offset > eb.batch->size ||
|
|
eb.batch_len > eb.batch->size - eb.batch_start_offset) {
|
|
DRM_DEBUG("Attempting to use out-of-bounds batch\n");
|
|
ret = -EINVAL;
|
|
goto err;
|
|
}
|
|
|
|
if (eb.engine->needs_cmd_parser && eb.batch_len) {
|
|
struct i915_vma *vma;
|
|
|
|
vma = eb_parse(&eb, drm_is_current_master(file));
|
|
if (IS_ERR(vma)) {
|
|
ret = PTR_ERR(vma);
|
|
goto err;
|
|
}
|
|
|
|
if (vma) {
|
|
/*
|
|
* Batch parsed and accepted:
|
|
*
|
|
* Set the DISPATCH_SECURE bit to remove the NON_SECURE
|
|
* bit from MI_BATCH_BUFFER_START commands issued in
|
|
* the dispatch_execbuffer implementations. We
|
|
* specifically don't want that set on batches the
|
|
* command parser has accepted.
|
|
*/
|
|
eb.dispatch_flags |= I915_DISPATCH_SECURE;
|
|
eb.batch_start_offset = 0;
|
|
eb.batch = vma;
|
|
}
|
|
}
|
|
|
|
eb.batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
|
|
if (eb.batch_len == 0)
|
|
eb.batch_len = eb.batch->size - eb.batch_start_offset;
|
|
|
|
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
|
|
* batch" bit. Hence we need to pin secure batches into the global gtt.
|
|
* hsw should have this fixed, but bdw mucks it up again. */
|
|
if (eb.dispatch_flags & I915_DISPATCH_SECURE) {
|
|
struct drm_i915_gem_object *obj = eb.batch->obj;
|
|
struct i915_vma *vma;
|
|
|
|
/*
|
|
* So on first glance it looks freaky that we pin the batch here
|
|
* outside of the reservation loop. But:
|
|
* - The batch is already pinned into the relevant ppgtt, so we
|
|
* already have the backing storage fully allocated.
|
|
* - No other BO uses the global gtt (well contexts, but meh),
|
|
* so we don't really have issues with multiple objects not
|
|
* fitting due to fragmentation.
|
|
* So this is actually safe.
|
|
*/
|
|
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
|
|
if (IS_ERR(vma)) {
|
|
ret = PTR_ERR(vma);
|
|
goto err;
|
|
}
|
|
|
|
eb.batch = vma;
|
|
}
|
|
|
|
/* Allocate a request for this batch buffer nice and early. */
|
|
eb.request = i915_gem_request_alloc(eb.engine, eb.ctx);
|
|
if (IS_ERR(eb.request)) {
|
|
ret = PTR_ERR(eb.request);
|
|
goto err_batch_unpin;
|
|
}
|
|
|
|
if (in_fence) {
|
|
ret = i915_gem_request_await_dma_fence(eb.request, in_fence);
|
|
if (ret < 0)
|
|
goto err_request;
|
|
}
|
|
|
|
if (out_fence_fd != -1) {
|
|
out_fence = sync_file_create(&eb.request->fence);
|
|
if (!out_fence) {
|
|
ret = -ENOMEM;
|
|
goto err_request;
|
|
}
|
|
}
|
|
|
|
/* Whilst this request exists, batch_obj will be on the
|
|
* active_list, and so will hold the active reference. Only when this
|
|
* request is retired will the the batch_obj be moved onto the
|
|
* inactive_list and lose its active reference. Hence we do not need
|
|
* to explicitly hold another reference here.
|
|
*/
|
|
eb.request->batch = eb.batch;
|
|
|
|
trace_i915_gem_request_queue(eb.request, eb.dispatch_flags);
|
|
ret = execbuf_submit(&eb);
|
|
err_request:
|
|
__i915_add_request(eb.request, ret == 0);
|
|
add_to_client(eb.request, file);
|
|
|
|
if (out_fence) {
|
|
if (ret == 0) {
|
|
fd_install(out_fence_fd, out_fence->file);
|
|
args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
|
|
args->rsvd2 |= (u64)out_fence_fd << 32;
|
|
out_fence_fd = -1;
|
|
} else {
|
|
fput(out_fence->file);
|
|
}
|
|
}
|
|
|
|
err_batch_unpin:
|
|
/*
|
|
* FIXME: We crucially rely upon the active tracking for the (ppgtt)
|
|
* batch vma for correctness. For less ugly and less fragility this
|
|
* needs to be adjusted to also track the ggtt batch vma properly as
|
|
* active.
|
|
*/
|
|
if (eb.dispatch_flags & I915_DISPATCH_SECURE)
|
|
i915_vma_unpin(eb.batch);
|
|
err:
|
|
/* the request owns the ref now */
|
|
eb_destroy(&eb);
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
pre_mutex_err:
|
|
/* intel_gpu_busy should also get a ref, so it will free when the device
|
|
* is really idle. */
|
|
intel_runtime_pm_put(eb.i915);
|
|
if (out_fence_fd != -1)
|
|
put_unused_fd(out_fence_fd);
|
|
err_in_fence:
|
|
dma_fence_put(in_fence);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Legacy execbuffer just creates an exec2 list from the original exec object
|
|
* list array and passes it to the real function.
|
|
*/
|
|
int
|
|
i915_gem_execbuffer(struct drm_device *dev, void *data,
|
|
struct drm_file *file)
|
|
{
|
|
struct drm_i915_gem_execbuffer *args = data;
|
|
struct drm_i915_gem_execbuffer2 exec2;
|
|
struct drm_i915_gem_exec_object *exec_list = NULL;
|
|
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
|
|
int ret, i;
|
|
|
|
if (args->buffer_count < 1) {
|
|
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* Copy in the exec list from userland */
|
|
exec_list = kvmalloc_array(sizeof(*exec_list), args->buffer_count, GFP_KERNEL);
|
|
exec2_list = kvmalloc_array(sizeof(*exec2_list), args->buffer_count, GFP_KERNEL);
|
|
if (exec_list == NULL || exec2_list == NULL) {
|
|
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
|
|
args->buffer_count);
|
|
kvfree(exec_list);
|
|
kvfree(exec2_list);
|
|
return -ENOMEM;
|
|
}
|
|
ret = copy_from_user(exec_list,
|
|
u64_to_user_ptr(args->buffers_ptr),
|
|
sizeof(*exec_list) * args->buffer_count);
|
|
if (ret != 0) {
|
|
DRM_DEBUG("copy %d exec entries failed %d\n",
|
|
args->buffer_count, ret);
|
|
kvfree(exec_list);
|
|
kvfree(exec2_list);
|
|
return -EFAULT;
|
|
}
|
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
|
exec2_list[i].handle = exec_list[i].handle;
|
|
exec2_list[i].relocation_count = exec_list[i].relocation_count;
|
|
exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
|
|
exec2_list[i].alignment = exec_list[i].alignment;
|
|
exec2_list[i].offset = exec_list[i].offset;
|
|
if (INTEL_GEN(to_i915(dev)) < 4)
|
|
exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
|
|
else
|
|
exec2_list[i].flags = 0;
|
|
}
|
|
|
|
exec2.buffers_ptr = args->buffers_ptr;
|
|
exec2.buffer_count = args->buffer_count;
|
|
exec2.batch_start_offset = args->batch_start_offset;
|
|
exec2.batch_len = args->batch_len;
|
|
exec2.DR1 = args->DR1;
|
|
exec2.DR4 = args->DR4;
|
|
exec2.num_cliprects = args->num_cliprects;
|
|
exec2.cliprects_ptr = args->cliprects_ptr;
|
|
exec2.flags = I915_EXEC_RENDER;
|
|
i915_execbuffer2_set_context_id(exec2, 0);
|
|
|
|
ret = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list);
|
|
if (!ret) {
|
|
struct drm_i915_gem_exec_object __user *user_exec_list =
|
|
u64_to_user_ptr(args->buffers_ptr);
|
|
|
|
/* Copy the new buffer offsets back to the user's exec list. */
|
|
for (i = 0; i < args->buffer_count; i++) {
|
|
exec2_list[i].offset =
|
|
gen8_canonical_addr(exec2_list[i].offset);
|
|
ret = __copy_to_user(&user_exec_list[i].offset,
|
|
&exec2_list[i].offset,
|
|
sizeof(user_exec_list[i].offset));
|
|
if (ret) {
|
|
ret = -EFAULT;
|
|
DRM_DEBUG("failed to copy %d exec entries "
|
|
"back to user (%d)\n",
|
|
args->buffer_count, ret);
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
kvfree(exec_list);
|
|
kvfree(exec2_list);
|
|
return ret;
|
|
}
|
|
|
|
int
|
|
i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
|
struct drm_file *file)
|
|
{
|
|
struct drm_i915_gem_execbuffer2 *args = data;
|
|
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
|
|
int ret;
|
|
|
|
if (args->buffer_count < 1 ||
|
|
args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
|
|
DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
|
|
return -EINVAL;
|
|
}
|
|
|
|
exec2_list = kvmalloc_array(args->buffer_count,
|
|
sizeof(*exec2_list),
|
|
GFP_TEMPORARY);
|
|
if (exec2_list == NULL) {
|
|
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
|
|
args->buffer_count);
|
|
return -ENOMEM;
|
|
}
|
|
ret = copy_from_user(exec2_list,
|
|
u64_to_user_ptr(args->buffers_ptr),
|
|
sizeof(*exec2_list) * args->buffer_count);
|
|
if (ret != 0) {
|
|
DRM_DEBUG("copy %d exec entries failed %d\n",
|
|
args->buffer_count, ret);
|
|
kvfree(exec2_list);
|
|
return -EFAULT;
|
|
}
|
|
|
|
ret = i915_gem_do_execbuffer(dev, file, args, exec2_list);
|
|
if (!ret) {
|
|
/* Copy the new buffer offsets back to the user's exec list. */
|
|
struct drm_i915_gem_exec_object2 __user *user_exec_list =
|
|
u64_to_user_ptr(args->buffers_ptr);
|
|
int i;
|
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
|
exec2_list[i].offset =
|
|
gen8_canonical_addr(exec2_list[i].offset);
|
|
ret = __copy_to_user(&user_exec_list[i].offset,
|
|
&exec2_list[i].offset,
|
|
sizeof(user_exec_list[i].offset));
|
|
if (ret) {
|
|
ret = -EFAULT;
|
|
DRM_DEBUG("failed to copy %d exec entries "
|
|
"back to user\n",
|
|
args->buffer_count);
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
kvfree(exec2_list);
|
|
return ret;
|
|
}
|