2010-11-25 18:00:26 +00:00
|
|
|
/*
|
|
|
|
* Copyright © 2008,2010 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Eric Anholt <eric@anholt.net>
|
|
|
|
* Chris Wilson <chris@chris-wilson.co.uk>
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2012-10-02 17:01:07 +00:00
|
|
|
#include <drm/drmP.h>
|
|
|
|
#include <drm/i915_drm.h>
|
2010-11-25 18:00:26 +00:00
|
|
|
#include "i915_drv.h"
|
|
|
|
#include "i915_trace.h"
|
|
|
|
#include "intel_drv.h"
|
2011-12-10 01:16:37 +00:00
|
|
|
#include <linux/dma_remapping.h>
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2013-11-26 11:23:15 +00:00
|
|
|
#define __EXEC_OBJECT_HAS_PIN (1<<31)
|
|
|
|
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
|
|
|
|
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
struct eb_vmas {
|
|
|
|
struct list_head vmas;
|
2010-12-08 10:38:14 +00:00
|
|
|
int and;
|
2013-01-08 10:53:17 +00:00
|
|
|
union {
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
struct i915_vma *lut[0];
|
2013-01-08 10:53:17 +00:00
|
|
|
struct hlist_head buckets[0];
|
|
|
|
};
|
2010-12-08 10:38:14 +00:00
|
|
|
};
|
|
|
|
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
static struct eb_vmas *
|
2013-11-25 17:54:38 +00:00
|
|
|
eb_create(struct drm_i915_gem_execbuffer2 *args)
|
2010-12-08 10:38:14 +00:00
|
|
|
{
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
struct eb_vmas *eb = NULL;
|
2013-01-08 10:53:17 +00:00
|
|
|
|
|
|
|
if (args->flags & I915_EXEC_HANDLE_LUT) {
|
2013-09-19 12:00:11 +00:00
|
|
|
unsigned size = args->buffer_count;
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
size *= sizeof(struct i915_vma *);
|
|
|
|
size += sizeof(struct eb_vmas);
|
2013-01-08 10:53:17 +00:00
|
|
|
eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (eb == NULL) {
|
2013-09-19 12:00:11 +00:00
|
|
|
unsigned size = args->buffer_count;
|
|
|
|
unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
|
2013-03-27 13:04:55 +00:00
|
|
|
BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
|
2013-01-08 10:53:17 +00:00
|
|
|
while (count > 2*size)
|
|
|
|
count >>= 1;
|
|
|
|
eb = kzalloc(count*sizeof(struct hlist_head) +
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
sizeof(struct eb_vmas),
|
2013-01-08 10:53:17 +00:00
|
|
|
GFP_TEMPORARY);
|
|
|
|
if (eb == NULL)
|
|
|
|
return eb;
|
|
|
|
|
|
|
|
eb->and = count - 1;
|
|
|
|
} else
|
|
|
|
eb->and = -args->buffer_count;
|
|
|
|
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
INIT_LIST_HEAD(&eb->vmas);
|
2010-12-08 10:38:14 +00:00
|
|
|
return eb;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
eb_reset(struct eb_vmas *eb)
|
2010-12-08 10:38:14 +00:00
|
|
|
{
|
2013-01-08 10:53:17 +00:00
|
|
|
if (eb->and >= 0)
|
|
|
|
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
|
2010-12-08 10:38:14 +00:00
|
|
|
}
|
|
|
|
|
2013-01-08 10:53:14 +00:00
|
|
|
static int
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
eb_lookup_vmas(struct eb_vmas *eb,
|
|
|
|
struct drm_i915_gem_exec_object2 *exec,
|
|
|
|
const struct drm_i915_gem_execbuffer2 *args,
|
|
|
|
struct i915_address_space *vm,
|
|
|
|
struct drm_file *file)
|
2013-01-08 10:53:14 +00:00
|
|
|
{
|
drm/i915: Create bind/unbind abstraction for VMAs
To sum up what goes on here, we abstract the vma binding, similarly to
the previous object binding. This helps for distinguishing legacy
binding, versus modern binding. To keep the code churn as minimal as
possible, I am leaving in insert_entries(). It serves as the per
platform pte writing basically. bind_vma and insert_entries do share a
lot of similarities, and I did have designs to combine the two, but as
mentioned already... too much churn in an already massive patchset.
What follows are the 3 commits which existed discretely in the original
submissions. Upon rebasing on Broadwell support, it became clear that
separation was not good, and only made for more error prone code. Below
are the 3 commit messages with all their history.
drm/i915: Add bind/unbind object functions to VMA
drm/i915: Use the new vm [un]bind functions
drm/i915: reduce vm->insert_entries() usage
drm/i915: Add bind/unbind object functions to VMA
As we plumb the code with more VM information, it has become more
obvious that the easiest way to deal with bind and unbind is to simply
put the function pointers in the vm, and let those choose the correct
way to handle the page table updates. This change allows many places in
the code to simply be vm->bind, and not have to worry about
distinguishing PPGTT vs GGTT.
Notice that this patch has no impact on functionality. I've decided to
save the actual change until the next patch because I think it's easier
to review that way. I'm happy to squash the two, or let Daniel do it on
merge.
v2:
Make ggtt handle the quirky aliasing ppgtt
Add flags to bind object to support above
Don't ever call bind/unbind directly for PPGTT until we have real, full
PPGTT (use NULLs to assert this)
Make sure we rebind the ggtt if there already is a ggtt binding. This
happens on set cache levels.
Use VMA for bind/unbind (Daniel, Ben)
v3: Reorganize ggtt_vma_bind to be more concise and easier to read
(Ville). Change logic in unbind to only unbind ggtt when there is a
global mapping, and to remove a redundant check if the aliasing ppgtt
exists.
v4: Make the bind function a bit smarter about the cache levels to avoid
unnecessary multiple remaps. "I accept it is a wart, I think unifying
the pin_vma / bind_vma could be unified later" (Chris)
Removed the git notes, and put version info here. (Daniel)
v5: Update the comment to not suck (Chris)
v6:
Move bind/unbind to the VMA. It makes more sense in the VMA structure
(always has, but I was previously lazy). With this change, it will allow
us to keep a distinct insert_entries.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
drm/i915: Use the new vm [un]bind functions
Building on the last patch which created the new function pointers in
the VM for bind/unbind, here we actually put those new function pointers
to use.
Split out as a separate patch to aid in review. I'm fine with squashing
into the previous patch if people request it.
v2: Updated to address the smart ggtt which can do aliasing as needed
Make sure we bind to global gtt when mappable and fenceable. I thought
we could get away without this initialy, but we cannot.
v3: Make the global GTT binding explicitly use the ggtt VM for
bind_vma(). While at it, use the new ggtt_vma helper (Chris)
At this point the original mailing list thread diverges. ie.
v4^:
use target_obj instead of obj for gen6 relocate_entry
vma->bind_vma() can be called safely during pin. So simply do that
instead of the complicated conditionals.
Don't restore PPGTT bound objects on resume path
Bug fix in resume path for globally bound Bos
Properly handle secure dispatch
Rebased on vma bind/unbind conversion
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
drm/i915: reduce vm->insert_entries() usage
FKA: drm/i915: eliminate vm->insert_entries()
With bind/unbind function pointers in place, we no longer need
insert_entries. We could, and want, to remove clear_range, however it's
not totally easy at this point. Since it's used in a couple of place
still that don't only deal in objects: setup, ppgtt init, and restore
gtt mappings.
v2: Don't actually remove insert_entries, just limit its usage. It will
be useful when we introduce gen8. It will always be called from the vma
bind/unbind.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v1)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:10:56 +00:00
|
|
|
struct drm_i915_private *dev_priv = vm->dev->dev_private;
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
struct list_head objects;
|
2013-12-04 09:52:58 +00:00
|
|
|
int i, ret;
|
2013-01-08 10:53:14 +00:00
|
|
|
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
INIT_LIST_HEAD(&objects);
|
2013-01-08 10:53:14 +00:00
|
|
|
spin_lock(&file->table_lock);
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
/* Grab a reference to the object and release the lock so we can lookup
|
|
|
|
* or create the VMA without using GFP_ATOMIC */
|
2013-01-08 10:53:17 +00:00
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
2013-01-08 10:53:14 +00:00
|
|
|
obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
|
|
|
|
if (obj == NULL) {
|
|
|
|
spin_unlock(&file->table_lock);
|
|
|
|
DRM_DEBUG("Invalid object handle %d at index %d\n",
|
|
|
|
exec[i].handle, i);
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
ret = -ENOENT;
|
2013-12-04 09:52:58 +00:00
|
|
|
goto err;
|
2013-01-08 10:53:14 +00:00
|
|
|
}
|
|
|
|
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
if (!list_empty(&obj->obj_exec_link)) {
|
2013-01-08 10:53:14 +00:00
|
|
|
spin_unlock(&file->table_lock);
|
|
|
|
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
|
|
|
|
obj, exec[i].handle, i);
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
ret = -EINVAL;
|
2013-12-04 09:52:58 +00:00
|
|
|
goto err;
|
2013-01-08 10:53:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
drm_gem_object_reference(&obj->base);
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
list_add_tail(&obj->obj_exec_link, &objects);
|
|
|
|
}
|
|
|
|
spin_unlock(&file->table_lock);
|
2013-01-08 10:53:14 +00:00
|
|
|
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
i = 0;
|
2013-12-04 09:52:58 +00:00
|
|
|
while (!list_empty(&objects)) {
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
struct i915_vma *vma;
|
drm/i915: Create bind/unbind abstraction for VMAs
To sum up what goes on here, we abstract the vma binding, similarly to
the previous object binding. This helps for distinguishing legacy
binding, versus modern binding. To keep the code churn as minimal as
possible, I am leaving in insert_entries(). It serves as the per
platform pte writing basically. bind_vma and insert_entries do share a
lot of similarities, and I did have designs to combine the two, but as
mentioned already... too much churn in an already massive patchset.
What follows are the 3 commits which existed discretely in the original
submissions. Upon rebasing on Broadwell support, it became clear that
separation was not good, and only made for more error prone code. Below
are the 3 commit messages with all their history.
drm/i915: Add bind/unbind object functions to VMA
drm/i915: Use the new vm [un]bind functions
drm/i915: reduce vm->insert_entries() usage
drm/i915: Add bind/unbind object functions to VMA
As we plumb the code with more VM information, it has become more
obvious that the easiest way to deal with bind and unbind is to simply
put the function pointers in the vm, and let those choose the correct
way to handle the page table updates. This change allows many places in
the code to simply be vm->bind, and not have to worry about
distinguishing PPGTT vs GGTT.
Notice that this patch has no impact on functionality. I've decided to
save the actual change until the next patch because I think it's easier
to review that way. I'm happy to squash the two, or let Daniel do it on
merge.
v2:
Make ggtt handle the quirky aliasing ppgtt
Add flags to bind object to support above
Don't ever call bind/unbind directly for PPGTT until we have real, full
PPGTT (use NULLs to assert this)
Make sure we rebind the ggtt if there already is a ggtt binding. This
happens on set cache levels.
Use VMA for bind/unbind (Daniel, Ben)
v3: Reorganize ggtt_vma_bind to be more concise and easier to read
(Ville). Change logic in unbind to only unbind ggtt when there is a
global mapping, and to remove a redundant check if the aliasing ppgtt
exists.
v4: Make the bind function a bit smarter about the cache levels to avoid
unnecessary multiple remaps. "I accept it is a wart, I think unifying
the pin_vma / bind_vma could be unified later" (Chris)
Removed the git notes, and put version info here. (Daniel)
v5: Update the comment to not suck (Chris)
v6:
Move bind/unbind to the VMA. It makes more sense in the VMA structure
(always has, but I was previously lazy). With this change, it will allow
us to keep a distinct insert_entries.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
drm/i915: Use the new vm [un]bind functions
Building on the last patch which created the new function pointers in
the VM for bind/unbind, here we actually put those new function pointers
to use.
Split out as a separate patch to aid in review. I'm fine with squashing
into the previous patch if people request it.
v2: Updated to address the smart ggtt which can do aliasing as needed
Make sure we bind to global gtt when mappable and fenceable. I thought
we could get away without this initialy, but we cannot.
v3: Make the global GTT binding explicitly use the ggtt VM for
bind_vma(). While at it, use the new ggtt_vma helper (Chris)
At this point the original mailing list thread diverges. ie.
v4^:
use target_obj instead of obj for gen6 relocate_entry
vma->bind_vma() can be called safely during pin. So simply do that
instead of the complicated conditionals.
Don't restore PPGTT bound objects on resume path
Bug fix in resume path for globally bound Bos
Properly handle secure dispatch
Rebased on vma bind/unbind conversion
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
drm/i915: reduce vm->insert_entries() usage
FKA: drm/i915: eliminate vm->insert_entries()
With bind/unbind function pointers in place, we no longer need
insert_entries. We could, and want, to remove clear_range, however it's
not totally easy at this point. Since it's used in a couple of place
still that don't only deal in objects: setup, ppgtt init, and restore
gtt mappings.
v2: Don't actually remove insert_entries, just limit its usage. It will
be useful when we introduce gen8. It will always be called from the vma
bind/unbind.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v1)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:10:56 +00:00
|
|
|
struct i915_address_space *bind_vm = vm;
|
|
|
|
|
2013-12-18 16:38:53 +00:00
|
|
|
if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
|
|
|
|
USES_FULL_PPGTT(vm->dev)) {
|
|
|
|
ret = -EINVAL;
|
2014-01-14 18:21:49 +00:00
|
|
|
goto err;
|
2013-12-18 16:38:53 +00:00
|
|
|
}
|
|
|
|
|
drm/i915: Create bind/unbind abstraction for VMAs
To sum up what goes on here, we abstract the vma binding, similarly to
the previous object binding. This helps for distinguishing legacy
binding, versus modern binding. To keep the code churn as minimal as
possible, I am leaving in insert_entries(). It serves as the per
platform pte writing basically. bind_vma and insert_entries do share a
lot of similarities, and I did have designs to combine the two, but as
mentioned already... too much churn in an already massive patchset.
What follows are the 3 commits which existed discretely in the original
submissions. Upon rebasing on Broadwell support, it became clear that
separation was not good, and only made for more error prone code. Below
are the 3 commit messages with all their history.
drm/i915: Add bind/unbind object functions to VMA
drm/i915: Use the new vm [un]bind functions
drm/i915: reduce vm->insert_entries() usage
drm/i915: Add bind/unbind object functions to VMA
As we plumb the code with more VM information, it has become more
obvious that the easiest way to deal with bind and unbind is to simply
put the function pointers in the vm, and let those choose the correct
way to handle the page table updates. This change allows many places in
the code to simply be vm->bind, and not have to worry about
distinguishing PPGTT vs GGTT.
Notice that this patch has no impact on functionality. I've decided to
save the actual change until the next patch because I think it's easier
to review that way. I'm happy to squash the two, or let Daniel do it on
merge.
v2:
Make ggtt handle the quirky aliasing ppgtt
Add flags to bind object to support above
Don't ever call bind/unbind directly for PPGTT until we have real, full
PPGTT (use NULLs to assert this)
Make sure we rebind the ggtt if there already is a ggtt binding. This
happens on set cache levels.
Use VMA for bind/unbind (Daniel, Ben)
v3: Reorganize ggtt_vma_bind to be more concise and easier to read
(Ville). Change logic in unbind to only unbind ggtt when there is a
global mapping, and to remove a redundant check if the aliasing ppgtt
exists.
v4: Make the bind function a bit smarter about the cache levels to avoid
unnecessary multiple remaps. "I accept it is a wart, I think unifying
the pin_vma / bind_vma could be unified later" (Chris)
Removed the git notes, and put version info here. (Daniel)
v5: Update the comment to not suck (Chris)
v6:
Move bind/unbind to the VMA. It makes more sense in the VMA structure
(always has, but I was previously lazy). With this change, it will allow
us to keep a distinct insert_entries.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
drm/i915: Use the new vm [un]bind functions
Building on the last patch which created the new function pointers in
the VM for bind/unbind, here we actually put those new function pointers
to use.
Split out as a separate patch to aid in review. I'm fine with squashing
into the previous patch if people request it.
v2: Updated to address the smart ggtt which can do aliasing as needed
Make sure we bind to global gtt when mappable and fenceable. I thought
we could get away without this initialy, but we cannot.
v3: Make the global GTT binding explicitly use the ggtt VM for
bind_vma(). While at it, use the new ggtt_vma helper (Chris)
At this point the original mailing list thread diverges. ie.
v4^:
use target_obj instead of obj for gen6 relocate_entry
vma->bind_vma() can be called safely during pin. So simply do that
instead of the complicated conditionals.
Don't restore PPGTT bound objects on resume path
Bug fix in resume path for globally bound Bos
Properly handle secure dispatch
Rebased on vma bind/unbind conversion
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
drm/i915: reduce vm->insert_entries() usage
FKA: drm/i915: eliminate vm->insert_entries()
With bind/unbind function pointers in place, we no longer need
insert_entries. We could, and want, to remove clear_range, however it's
not totally easy at this point. Since it's used in a couple of place
still that don't only deal in objects: setup, ppgtt init, and restore
gtt mappings.
v2: Don't actually remove insert_entries, just limit its usage. It will
be useful when we introduce gen8. It will always be called from the vma
bind/unbind.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v1)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:10:56 +00:00
|
|
|
/* If we have secure dispatch, or the userspace assures us that
|
|
|
|
* they know what they're doing, use the GGTT VM.
|
|
|
|
*/
|
2013-12-18 16:46:18 +00:00
|
|
|
if (((args->flags & I915_EXEC_SECURE) &&
|
drm/i915: Create bind/unbind abstraction for VMAs
To sum up what goes on here, we abstract the vma binding, similarly to
the previous object binding. This helps for distinguishing legacy
binding, versus modern binding. To keep the code churn as minimal as
possible, I am leaving in insert_entries(). It serves as the per
platform pte writing basically. bind_vma and insert_entries do share a
lot of similarities, and I did have designs to combine the two, but as
mentioned already... too much churn in an already massive patchset.
What follows are the 3 commits which existed discretely in the original
submissions. Upon rebasing on Broadwell support, it became clear that
separation was not good, and only made for more error prone code. Below
are the 3 commit messages with all their history.
drm/i915: Add bind/unbind object functions to VMA
drm/i915: Use the new vm [un]bind functions
drm/i915: reduce vm->insert_entries() usage
drm/i915: Add bind/unbind object functions to VMA
As we plumb the code with more VM information, it has become more
obvious that the easiest way to deal with bind and unbind is to simply
put the function pointers in the vm, and let those choose the correct
way to handle the page table updates. This change allows many places in
the code to simply be vm->bind, and not have to worry about
distinguishing PPGTT vs GGTT.
Notice that this patch has no impact on functionality. I've decided to
save the actual change until the next patch because I think it's easier
to review that way. I'm happy to squash the two, or let Daniel do it on
merge.
v2:
Make ggtt handle the quirky aliasing ppgtt
Add flags to bind object to support above
Don't ever call bind/unbind directly for PPGTT until we have real, full
PPGTT (use NULLs to assert this)
Make sure we rebind the ggtt if there already is a ggtt binding. This
happens on set cache levels.
Use VMA for bind/unbind (Daniel, Ben)
v3: Reorganize ggtt_vma_bind to be more concise and easier to read
(Ville). Change logic in unbind to only unbind ggtt when there is a
global mapping, and to remove a redundant check if the aliasing ppgtt
exists.
v4: Make the bind function a bit smarter about the cache levels to avoid
unnecessary multiple remaps. "I accept it is a wart, I think unifying
the pin_vma / bind_vma could be unified later" (Chris)
Removed the git notes, and put version info here. (Daniel)
v5: Update the comment to not suck (Chris)
v6:
Move bind/unbind to the VMA. It makes more sense in the VMA structure
(always has, but I was previously lazy). With this change, it will allow
us to keep a distinct insert_entries.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
drm/i915: Use the new vm [un]bind functions
Building on the last patch which created the new function pointers in
the VM for bind/unbind, here we actually put those new function pointers
to use.
Split out as a separate patch to aid in review. I'm fine with squashing
into the previous patch if people request it.
v2: Updated to address the smart ggtt which can do aliasing as needed
Make sure we bind to global gtt when mappable and fenceable. I thought
we could get away without this initialy, but we cannot.
v3: Make the global GTT binding explicitly use the ggtt VM for
bind_vma(). While at it, use the new ggtt_vma helper (Chris)
At this point the original mailing list thread diverges. ie.
v4^:
use target_obj instead of obj for gen6 relocate_entry
vma->bind_vma() can be called safely during pin. So simply do that
instead of the complicated conditionals.
Don't restore PPGTT bound objects on resume path
Bug fix in resume path for globally bound Bos
Properly handle secure dispatch
Rebased on vma bind/unbind conversion
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
drm/i915: reduce vm->insert_entries() usage
FKA: drm/i915: eliminate vm->insert_entries()
With bind/unbind function pointers in place, we no longer need
insert_entries. We could, and want, to remove clear_range, however it's
not totally easy at this point. Since it's used in a couple of place
still that don't only deal in objects: setup, ppgtt init, and restore
gtt mappings.
v2: Don't actually remove insert_entries, just limit its usage. It will
be useful when we introduce gen8. It will always be called from the vma
bind/unbind.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v1)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:10:56 +00:00
|
|
|
(i == (args->buffer_count - 1))))
|
|
|
|
bind_vm = &dev_priv->gtt.base;
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
|
2013-12-04 09:52:58 +00:00
|
|
|
obj = list_first_entry(&objects,
|
|
|
|
struct drm_i915_gem_object,
|
|
|
|
obj_exec_link);
|
|
|
|
|
2013-08-14 12:14:04 +00:00
|
|
|
/*
|
|
|
|
* NOTE: We can leak any vmas created here when something fails
|
|
|
|
* later on. But that's no issue since vma_unbind can deal with
|
|
|
|
* vmas which are not actually bound. And since only
|
|
|
|
* lookup_or_create exists as an interface to get at the vma
|
|
|
|
* from the (obj, vm) we don't run the risk of creating
|
|
|
|
* duplicated vmas for the same vm.
|
|
|
|
*/
|
drm/i915: Create bind/unbind abstraction for VMAs
To sum up what goes on here, we abstract the vma binding, similarly to
the previous object binding. This helps for distinguishing legacy
binding, versus modern binding. To keep the code churn as minimal as
possible, I am leaving in insert_entries(). It serves as the per
platform pte writing basically. bind_vma and insert_entries do share a
lot of similarities, and I did have designs to combine the two, but as
mentioned already... too much churn in an already massive patchset.
What follows are the 3 commits which existed discretely in the original
submissions. Upon rebasing on Broadwell support, it became clear that
separation was not good, and only made for more error prone code. Below
are the 3 commit messages with all their history.
drm/i915: Add bind/unbind object functions to VMA
drm/i915: Use the new vm [un]bind functions
drm/i915: reduce vm->insert_entries() usage
drm/i915: Add bind/unbind object functions to VMA
As we plumb the code with more VM information, it has become more
obvious that the easiest way to deal with bind and unbind is to simply
put the function pointers in the vm, and let those choose the correct
way to handle the page table updates. This change allows many places in
the code to simply be vm->bind, and not have to worry about
distinguishing PPGTT vs GGTT.
Notice that this patch has no impact on functionality. I've decided to
save the actual change until the next patch because I think it's easier
to review that way. I'm happy to squash the two, or let Daniel do it on
merge.
v2:
Make ggtt handle the quirky aliasing ppgtt
Add flags to bind object to support above
Don't ever call bind/unbind directly for PPGTT until we have real, full
PPGTT (use NULLs to assert this)
Make sure we rebind the ggtt if there already is a ggtt binding. This
happens on set cache levels.
Use VMA for bind/unbind (Daniel, Ben)
v3: Reorganize ggtt_vma_bind to be more concise and easier to read
(Ville). Change logic in unbind to only unbind ggtt when there is a
global mapping, and to remove a redundant check if the aliasing ppgtt
exists.
v4: Make the bind function a bit smarter about the cache levels to avoid
unnecessary multiple remaps. "I accept it is a wart, I think unifying
the pin_vma / bind_vma could be unified later" (Chris)
Removed the git notes, and put version info here. (Daniel)
v5: Update the comment to not suck (Chris)
v6:
Move bind/unbind to the VMA. It makes more sense in the VMA structure
(always has, but I was previously lazy). With this change, it will allow
us to keep a distinct insert_entries.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
drm/i915: Use the new vm [un]bind functions
Building on the last patch which created the new function pointers in
the VM for bind/unbind, here we actually put those new function pointers
to use.
Split out as a separate patch to aid in review. I'm fine with squashing
into the previous patch if people request it.
v2: Updated to address the smart ggtt which can do aliasing as needed
Make sure we bind to global gtt when mappable and fenceable. I thought
we could get away without this initialy, but we cannot.
v3: Make the global GTT binding explicitly use the ggtt VM for
bind_vma(). While at it, use the new ggtt_vma helper (Chris)
At this point the original mailing list thread diverges. ie.
v4^:
use target_obj instead of obj for gen6 relocate_entry
vma->bind_vma() can be called safely during pin. So simply do that
instead of the complicated conditionals.
Don't restore PPGTT bound objects on resume path
Bug fix in resume path for globally bound Bos
Properly handle secure dispatch
Rebased on vma bind/unbind conversion
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
drm/i915: reduce vm->insert_entries() usage
FKA: drm/i915: eliminate vm->insert_entries()
With bind/unbind function pointers in place, we no longer need
insert_entries. We could, and want, to remove clear_range, however it's
not totally easy at this point. Since it's used in a couple of place
still that don't only deal in objects: setup, ppgtt init, and restore
gtt mappings.
v2: Don't actually remove insert_entries, just limit its usage. It will
be useful when we introduce gen8. It will always be called from the vma
bind/unbind.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v1)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:10:56 +00:00
|
|
|
vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
DRM_DEBUG("Failed to lookup VMA\n");
|
|
|
|
ret = PTR_ERR(vma);
|
2013-12-04 09:52:58 +00:00
|
|
|
goto err;
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
}
|
|
|
|
|
2013-12-04 09:52:58 +00:00
|
|
|
/* Transfer ownership from the objects list to the vmas list. */
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
list_add_tail(&vma->exec_list, &eb->vmas);
|
2013-12-04 09:52:58 +00:00
|
|
|
list_del_init(&obj->obj_exec_link);
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
|
|
|
|
vma->exec_entry = &exec[i];
|
2013-01-08 10:53:17 +00:00
|
|
|
if (eb->and < 0) {
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
eb->lut[i] = vma;
|
2013-01-08 10:53:17 +00:00
|
|
|
} else {
|
|
|
|
uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
vma->exec_handle = handle;
|
|
|
|
hlist_add_head(&vma->exec_node,
|
2013-01-08 10:53:17 +00:00
|
|
|
&eb->buckets[handle & eb->and]);
|
|
|
|
}
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
++i;
|
2013-01-08 10:53:14 +00:00
|
|
|
}
|
|
|
|
|
2013-12-04 09:52:58 +00:00
|
|
|
return 0;
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
|
|
|
|
|
2013-12-04 09:52:58 +00:00
|
|
|
err:
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
while (!list_empty(&objects)) {
|
|
|
|
obj = list_first_entry(&objects,
|
|
|
|
struct drm_i915_gem_object,
|
|
|
|
obj_exec_link);
|
|
|
|
list_del_init(&obj->obj_exec_link);
|
2013-12-04 09:52:58 +00:00
|
|
|
drm_gem_object_unreference(&obj->base);
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
}
|
2013-12-04 09:52:58 +00:00
|
|
|
/*
|
|
|
|
* Objects already transfered to the vmas list will be unreferenced by
|
|
|
|
* eb_destroy.
|
|
|
|
*/
|
|
|
|
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
return ret;
|
2013-01-08 10:53:14 +00:00
|
|
|
}
|
|
|
|
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
|
2010-12-08 10:38:14 +00:00
|
|
|
{
|
2013-01-08 10:53:17 +00:00
|
|
|
if (eb->and < 0) {
|
|
|
|
if (handle >= -eb->and)
|
|
|
|
return NULL;
|
|
|
|
return eb->lut[handle];
|
|
|
|
} else {
|
|
|
|
struct hlist_head *head;
|
|
|
|
struct hlist_node *node;
|
2010-12-08 10:38:14 +00:00
|
|
|
|
2013-01-08 10:53:17 +00:00
|
|
|
head = &eb->buckets[handle & eb->and];
|
|
|
|
hlist_for_each(node, head) {
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
struct i915_vma *vma;
|
2010-12-08 10:38:14 +00:00
|
|
|
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
vma = hlist_entry(node, struct i915_vma, exec_node);
|
|
|
|
if (vma->exec_handle == handle)
|
|
|
|
return vma;
|
2013-01-08 10:53:17 +00:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
2010-12-08 10:38:14 +00:00
|
|
|
}
|
|
|
|
|
2013-11-26 11:23:15 +00:00
|
|
|
static void
|
|
|
|
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_exec_object2 *entry;
|
|
|
|
struct drm_i915_gem_object *obj = vma->obj;
|
|
|
|
|
|
|
|
if (!drm_mm_node_allocated(&vma->node))
|
|
|
|
return;
|
|
|
|
|
|
|
|
entry = vma->exec_entry;
|
|
|
|
|
|
|
|
if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
|
|
|
|
i915_gem_object_unpin_fence(obj);
|
|
|
|
|
|
|
|
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
|
2013-12-18 15:23:37 +00:00
|
|
|
vma->pin_count--;
|
2013-11-26 11:23:15 +00:00
|
|
|
|
|
|
|
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void eb_destroy(struct eb_vmas *eb)
|
|
|
|
{
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
while (!list_empty(&eb->vmas)) {
|
|
|
|
struct i915_vma *vma;
|
2013-01-08 10:53:15 +00:00
|
|
|
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
vma = list_first_entry(&eb->vmas,
|
|
|
|
struct i915_vma,
|
2013-01-08 10:53:15 +00:00
|
|
|
exec_list);
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
list_del_init(&vma->exec_list);
|
2013-11-26 11:23:15 +00:00
|
|
|
i915_gem_execbuffer_unreserve_vma(vma);
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
drm_gem_object_unreference(&vma->obj->base);
|
2013-01-08 10:53:15 +00:00
|
|
|
}
|
2010-12-08 10:38:14 +00:00
|
|
|
kfree(eb);
|
|
|
|
}
|
|
|
|
|
2012-03-26 08:10:27 +00:00
|
|
|
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
2013-08-26 22:51:00 +00:00
|
|
|
return (HAS_LLC(obj->base.dev) ||
|
|
|
|
obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
|
2012-08-23 12:12:52 +00:00
|
|
|
!obj->map_and_fenceable ||
|
2012-03-26 08:10:27 +00:00
|
|
|
obj->cache_level != I915_CACHE_NONE);
|
|
|
|
}
|
|
|
|
|
2013-08-21 16:10:51 +00:00
|
|
|
static int
|
|
|
|
relocate_entry_cpu(struct drm_i915_gem_object *obj,
|
|
|
|
struct drm_i915_gem_relocation_entry *reloc)
|
|
|
|
{
|
2013-11-03 04:07:11 +00:00
|
|
|
struct drm_device *dev = obj->base.dev;
|
2013-08-21 16:10:51 +00:00
|
|
|
uint32_t page_offset = offset_in_page(reloc->offset);
|
|
|
|
char *vaddr;
|
2013-12-26 21:39:50 +00:00
|
|
|
int ret;
|
2013-08-21 16:10:51 +00:00
|
|
|
|
2013-08-26 22:51:00 +00:00
|
|
|
ret = i915_gem_object_set_to_cpu_domain(obj, true);
|
2013-08-21 16:10:51 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
vaddr = kmap_atomic(i915_gem_object_get_page(obj,
|
|
|
|
reloc->offset >> PAGE_SHIFT));
|
|
|
|
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
|
2013-11-03 04:07:11 +00:00
|
|
|
|
|
|
|
if (INTEL_INFO(dev)->gen >= 8) {
|
|
|
|
page_offset = offset_in_page(page_offset + sizeof(uint32_t));
|
|
|
|
|
|
|
|
if (page_offset == 0) {
|
|
|
|
kunmap_atomic(vaddr);
|
|
|
|
vaddr = kmap_atomic(i915_gem_object_get_page(obj,
|
|
|
|
(reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
|
|
|
|
}
|
|
|
|
|
|
|
|
*(uint32_t *)(vaddr + page_offset) = 0;
|
|
|
|
}
|
|
|
|
|
2013-08-21 16:10:51 +00:00
|
|
|
kunmap_atomic(vaddr);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
relocate_entry_gtt(struct drm_i915_gem_object *obj,
|
|
|
|
struct drm_i915_gem_relocation_entry *reloc)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->base.dev;
|
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
uint32_t __iomem *reloc_entry;
|
|
|
|
void __iomem *reloc_page;
|
2013-12-26 21:39:50 +00:00
|
|
|
int ret;
|
2013-08-21 16:10:51 +00:00
|
|
|
|
|
|
|
ret = i915_gem_object_set_to_gtt_domain(obj, true);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = i915_gem_object_put_fence(obj);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Map the page containing the relocation we're going to perform. */
|
|
|
|
reloc->offset += i915_gem_obj_ggtt_offset(obj);
|
|
|
|
reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
|
|
|
|
reloc->offset & PAGE_MASK);
|
|
|
|
reloc_entry = (uint32_t __iomem *)
|
|
|
|
(reloc_page + offset_in_page(reloc->offset));
|
|
|
|
iowrite32(reloc->delta, reloc_entry);
|
2013-11-03 04:07:11 +00:00
|
|
|
|
|
|
|
if (INTEL_INFO(dev)->gen >= 8) {
|
|
|
|
reloc_entry += 1;
|
|
|
|
|
|
|
|
if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
|
|
|
|
io_mapping_unmap_atomic(reloc_page);
|
|
|
|
reloc_page = io_mapping_map_atomic_wc(
|
|
|
|
dev_priv->gtt.mappable,
|
|
|
|
reloc->offset + sizeof(uint32_t));
|
|
|
|
reloc_entry = reloc_page;
|
|
|
|
}
|
|
|
|
|
|
|
|
iowrite32(0, reloc_entry);
|
|
|
|
}
|
|
|
|
|
2013-08-21 16:10:51 +00:00
|
|
|
io_mapping_unmap_atomic(reloc_page);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-11-25 18:00:26 +00:00
|
|
|
static int
|
|
|
|
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
struct eb_vmas *eb,
|
2013-12-06 22:10:57 +00:00
|
|
|
struct drm_i915_gem_relocation_entry *reloc)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->base.dev;
|
|
|
|
struct drm_gem_object *target_obj;
|
2012-02-15 22:50:23 +00:00
|
|
|
struct drm_i915_gem_object *target_i915_obj;
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
struct i915_vma *target_vma;
|
2010-11-25 18:00:26 +00:00
|
|
|
uint32_t target_offset;
|
2013-12-26 21:39:50 +00:00
|
|
|
int ret;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2010-12-08 10:38:14 +00:00
|
|
|
/* we've already hold a reference to all valid objects */
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
target_vma = eb_get_vma(eb, reloc->target_handle);
|
|
|
|
if (unlikely(target_vma == NULL))
|
2010-11-25 18:00:26 +00:00
|
|
|
return -ENOENT;
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
target_i915_obj = target_vma->obj;
|
|
|
|
target_obj = &target_vma->obj->base;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2013-11-25 17:54:40 +00:00
|
|
|
target_offset = target_vma->node.start;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2012-07-31 22:35:01 +00:00
|
|
|
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
|
|
|
|
* pipe_control writes because the gpu doesn't properly redirect them
|
|
|
|
* through the ppgtt for non_secure batchbuffers. */
|
|
|
|
if (unlikely(IS_GEN6(dev) &&
|
|
|
|
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
|
|
|
|
!target_i915_obj->has_global_gtt_mapping)) {
|
2013-12-06 22:10:57 +00:00
|
|
|
struct i915_vma *vma =
|
|
|
|
list_first_entry(&target_i915_obj->vma_list,
|
|
|
|
typeof(*vma), vma_link);
|
drm/i915: Create bind/unbind abstraction for VMAs
To sum up what goes on here, we abstract the vma binding, similarly to
the previous object binding. This helps for distinguishing legacy
binding, versus modern binding. To keep the code churn as minimal as
possible, I am leaving in insert_entries(). It serves as the per
platform pte writing basically. bind_vma and insert_entries do share a
lot of similarities, and I did have designs to combine the two, but as
mentioned already... too much churn in an already massive patchset.
What follows are the 3 commits which existed discretely in the original
submissions. Upon rebasing on Broadwell support, it became clear that
separation was not good, and only made for more error prone code. Below
are the 3 commit messages with all their history.
drm/i915: Add bind/unbind object functions to VMA
drm/i915: Use the new vm [un]bind functions
drm/i915: reduce vm->insert_entries() usage
drm/i915: Add bind/unbind object functions to VMA
As we plumb the code with more VM information, it has become more
obvious that the easiest way to deal with bind and unbind is to simply
put the function pointers in the vm, and let those choose the correct
way to handle the page table updates. This change allows many places in
the code to simply be vm->bind, and not have to worry about
distinguishing PPGTT vs GGTT.
Notice that this patch has no impact on functionality. I've decided to
save the actual change until the next patch because I think it's easier
to review that way. I'm happy to squash the two, or let Daniel do it on
merge.
v2:
Make ggtt handle the quirky aliasing ppgtt
Add flags to bind object to support above
Don't ever call bind/unbind directly for PPGTT until we have real, full
PPGTT (use NULLs to assert this)
Make sure we rebind the ggtt if there already is a ggtt binding. This
happens on set cache levels.
Use VMA for bind/unbind (Daniel, Ben)
v3: Reorganize ggtt_vma_bind to be more concise and easier to read
(Ville). Change logic in unbind to only unbind ggtt when there is a
global mapping, and to remove a redundant check if the aliasing ppgtt
exists.
v4: Make the bind function a bit smarter about the cache levels to avoid
unnecessary multiple remaps. "I accept it is a wart, I think unifying
the pin_vma / bind_vma could be unified later" (Chris)
Removed the git notes, and put version info here. (Daniel)
v5: Update the comment to not suck (Chris)
v6:
Move bind/unbind to the VMA. It makes more sense in the VMA structure
(always has, but I was previously lazy). With this change, it will allow
us to keep a distinct insert_entries.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
drm/i915: Use the new vm [un]bind functions
Building on the last patch which created the new function pointers in
the VM for bind/unbind, here we actually put those new function pointers
to use.
Split out as a separate patch to aid in review. I'm fine with squashing
into the previous patch if people request it.
v2: Updated to address the smart ggtt which can do aliasing as needed
Make sure we bind to global gtt when mappable and fenceable. I thought
we could get away without this initialy, but we cannot.
v3: Make the global GTT binding explicitly use the ggtt VM for
bind_vma(). While at it, use the new ggtt_vma helper (Chris)
At this point the original mailing list thread diverges. ie.
v4^:
use target_obj instead of obj for gen6 relocate_entry
vma->bind_vma() can be called safely during pin. So simply do that
instead of the complicated conditionals.
Don't restore PPGTT bound objects on resume path
Bug fix in resume path for globally bound Bos
Properly handle secure dispatch
Rebased on vma bind/unbind conversion
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
drm/i915: reduce vm->insert_entries() usage
FKA: drm/i915: eliminate vm->insert_entries()
With bind/unbind function pointers in place, we no longer need
insert_entries. We could, and want, to remove clear_range, however it's
not totally easy at this point. Since it's used in a couple of place
still that don't only deal in objects: setup, ppgtt init, and restore
gtt mappings.
v2: Don't actually remove insert_entries, just limit its usage. It will
be useful when we introduce gen8. It will always be called from the vma
bind/unbind.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v1)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:10:56 +00:00
|
|
|
vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
|
2012-07-31 22:35:01 +00:00
|
|
|
}
|
|
|
|
|
2010-11-25 18:00:26 +00:00
|
|
|
/* Validate that the target is in a valid r/w GPU domain */
|
2010-12-08 10:43:06 +00:00
|
|
|
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
|
2012-01-31 20:08:14 +00:00
|
|
|
DRM_DEBUG("reloc with multiple write domains: "
|
2010-11-25 18:00:26 +00:00
|
|
|
"obj %p target %d offset %d "
|
|
|
|
"read %08x write %08x",
|
|
|
|
obj, reloc->target_handle,
|
|
|
|
(int) reloc->offset,
|
|
|
|
reloc->read_domains,
|
|
|
|
reloc->write_domain);
|
2013-12-26 21:39:50 +00:00
|
|
|
return -EINVAL;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
2011-12-14 12:57:27 +00:00
|
|
|
if (unlikely((reloc->write_domain | reloc->read_domains)
|
|
|
|
& ~I915_GEM_GPU_DOMAINS)) {
|
2012-01-31 20:08:14 +00:00
|
|
|
DRM_DEBUG("reloc with read/write non-GPU domains: "
|
2010-11-25 18:00:26 +00:00
|
|
|
"obj %p target %d offset %d "
|
|
|
|
"read %08x write %08x",
|
|
|
|
obj, reloc->target_handle,
|
|
|
|
(int) reloc->offset,
|
|
|
|
reloc->read_domains,
|
|
|
|
reloc->write_domain);
|
2013-12-26 21:39:50 +00:00
|
|
|
return -EINVAL;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
target_obj->pending_read_domains |= reloc->read_domains;
|
|
|
|
target_obj->pending_write_domain |= reloc->write_domain;
|
|
|
|
|
|
|
|
/* If the relocation already has the right value in it, no
|
|
|
|
* more work needs to be done.
|
|
|
|
*/
|
|
|
|
if (target_offset == reloc->presumed_offset)
|
2010-12-08 10:38:14 +00:00
|
|
|
return 0;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
|
|
|
/* Check that the relocation address is valid... */
|
2013-11-03 04:07:11 +00:00
|
|
|
if (unlikely(reloc->offset >
|
|
|
|
obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
|
2012-01-31 20:08:14 +00:00
|
|
|
DRM_DEBUG("Relocation beyond object bounds: "
|
2010-11-25 18:00:26 +00:00
|
|
|
"obj %p target %d offset %d size %d.\n",
|
|
|
|
obj, reloc->target_handle,
|
|
|
|
(int) reloc->offset,
|
|
|
|
(int) obj->base.size);
|
2013-12-26 21:39:50 +00:00
|
|
|
return -EINVAL;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
2010-12-08 10:43:06 +00:00
|
|
|
if (unlikely(reloc->offset & 3)) {
|
2012-01-31 20:08:14 +00:00
|
|
|
DRM_DEBUG("Relocation not 4-byte aligned: "
|
2010-11-25 18:00:26 +00:00
|
|
|
"obj %p target %d offset %d.\n",
|
|
|
|
obj, reloc->target_handle,
|
|
|
|
(int) reloc->offset);
|
2013-12-26 21:39:50 +00:00
|
|
|
return -EINVAL;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
2012-03-26 08:10:27 +00:00
|
|
|
/* We can't wait for rendering with pagefaults disabled */
|
|
|
|
if (obj->active && in_atomic())
|
|
|
|
return -EFAULT;
|
|
|
|
|
2010-11-25 18:00:26 +00:00
|
|
|
reloc->delta += target_offset;
|
2013-08-21 16:10:51 +00:00
|
|
|
if (use_cpu_reloc(obj))
|
|
|
|
ret = relocate_entry_cpu(obj, reloc);
|
|
|
|
else
|
|
|
|
ret = relocate_entry_gtt(obj, reloc);
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2013-09-02 18:56:23 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2010-11-25 18:00:26 +00:00
|
|
|
/* and update the user's relocation entry */
|
|
|
|
reloc->presumed_offset = target_offset;
|
|
|
|
|
2010-12-08 10:38:14 +00:00
|
|
|
return 0;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
|
|
|
|
struct eb_vmas *eb)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
2012-03-24 20:12:53 +00:00
|
|
|
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
|
|
|
|
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
|
2010-11-25 18:00:26 +00:00
|
|
|
struct drm_i915_gem_relocation_entry __user *user_relocs;
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
2012-03-24 20:12:53 +00:00
|
|
|
int remain, ret;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2013-02-22 14:12:51 +00:00
|
|
|
user_relocs = to_user_ptr(entry->relocs_ptr);
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2012-03-24 20:12:53 +00:00
|
|
|
remain = entry->relocation_count;
|
|
|
|
while (remain) {
|
|
|
|
struct drm_i915_gem_relocation_entry *r = stack_reloc;
|
|
|
|
int count = remain;
|
|
|
|
if (count > ARRAY_SIZE(stack_reloc))
|
|
|
|
count = ARRAY_SIZE(stack_reloc);
|
|
|
|
remain -= count;
|
|
|
|
|
|
|
|
if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
|
2010-11-25 18:00:26 +00:00
|
|
|
return -EFAULT;
|
|
|
|
|
2012-03-24 20:12:53 +00:00
|
|
|
do {
|
|
|
|
u64 offset = r->presumed_offset;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2013-12-06 22:10:57 +00:00
|
|
|
ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
|
2012-03-24 20:12:53 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (r->presumed_offset != offset &&
|
|
|
|
__copy_to_user_inatomic(&user_relocs->presumed_offset,
|
|
|
|
&r->presumed_offset,
|
|
|
|
sizeof(r->presumed_offset))) {
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
user_relocs++;
|
|
|
|
r++;
|
|
|
|
} while (--count);
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2012-03-24 20:12:53 +00:00
|
|
|
#undef N_RELOC
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
|
|
|
|
struct eb_vmas *eb,
|
|
|
|
struct drm_i915_gem_relocation_entry *relocs)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
2010-11-25 18:00:26 +00:00
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
for (i = 0; i < entry->relocation_count; i++) {
|
2013-12-06 22:10:57 +00:00
|
|
|
ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
|
2010-11-25 18:00:26 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2013-11-25 17:54:38 +00:00
|
|
|
i915_gem_execbuffer_relocate(struct eb_vmas *eb)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
struct i915_vma *vma;
|
2011-03-14 15:11:24 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/* This is the fast path and we cannot handle a pagefault whilst
|
|
|
|
* holding the struct mutex lest the user pass in the relocations
|
|
|
|
* contained within a mmaped bo. For in such a case we, the page
|
|
|
|
* fault handler would call i915_gem_fault() and we would try to
|
|
|
|
* acquire the struct mutex again. Obviously this is bad and so
|
|
|
|
* lockdep complains vehemently.
|
|
|
|
*/
|
|
|
|
pagefault_disable();
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
list_for_each_entry(vma, &eb->vmas, exec_list) {
|
|
|
|
ret = i915_gem_execbuffer_relocate_vma(vma, eb);
|
2010-11-25 18:00:26 +00:00
|
|
|
if (ret)
|
2011-03-14 15:11:24 +00:00
|
|
|
break;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
2011-03-14 15:11:24 +00:00
|
|
|
pagefault_enable();
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2011-03-14 15:11:24 +00:00
|
|
|
return ret;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
2012-03-26 08:10:27 +00:00
|
|
|
static int
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
need_reloc_mappable(struct i915_vma *vma)
|
2012-03-26 08:10:27 +00:00
|
|
|
{
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
|
|
|
return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
|
|
|
|
i915_is_ggtt(vma->vm);
|
2012-03-26 08:10:27 +00:00
|
|
|
}
|
|
|
|
|
2011-12-14 12:57:08 +00:00
|
|
|
static int
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
|
|
|
struct intel_ring_buffer *ring,
|
|
|
|
bool *need_reloc)
|
2011-12-14 12:57:08 +00:00
|
|
|
{
|
drm/i915: Create bind/unbind abstraction for VMAs
To sum up what goes on here, we abstract the vma binding, similarly to
the previous object binding. This helps for distinguishing legacy
binding, versus modern binding. To keep the code churn as minimal as
possible, I am leaving in insert_entries(). It serves as the per
platform pte writing basically. bind_vma and insert_entries do share a
lot of similarities, and I did have designs to combine the two, but as
mentioned already... too much churn in an already massive patchset.
What follows are the 3 commits which existed discretely in the original
submissions. Upon rebasing on Broadwell support, it became clear that
separation was not good, and only made for more error prone code. Below
are the 3 commit messages with all their history.
drm/i915: Add bind/unbind object functions to VMA
drm/i915: Use the new vm [un]bind functions
drm/i915: reduce vm->insert_entries() usage
drm/i915: Add bind/unbind object functions to VMA
As we plumb the code with more VM information, it has become more
obvious that the easiest way to deal with bind and unbind is to simply
put the function pointers in the vm, and let those choose the correct
way to handle the page table updates. This change allows many places in
the code to simply be vm->bind, and not have to worry about
distinguishing PPGTT vs GGTT.
Notice that this patch has no impact on functionality. I've decided to
save the actual change until the next patch because I think it's easier
to review that way. I'm happy to squash the two, or let Daniel do it on
merge.
v2:
Make ggtt handle the quirky aliasing ppgtt
Add flags to bind object to support above
Don't ever call bind/unbind directly for PPGTT until we have real, full
PPGTT (use NULLs to assert this)
Make sure we rebind the ggtt if there already is a ggtt binding. This
happens on set cache levels.
Use VMA for bind/unbind (Daniel, Ben)
v3: Reorganize ggtt_vma_bind to be more concise and easier to read
(Ville). Change logic in unbind to only unbind ggtt when there is a
global mapping, and to remove a redundant check if the aliasing ppgtt
exists.
v4: Make the bind function a bit smarter about the cache levels to avoid
unnecessary multiple remaps. "I accept it is a wart, I think unifying
the pin_vma / bind_vma could be unified later" (Chris)
Removed the git notes, and put version info here. (Daniel)
v5: Update the comment to not suck (Chris)
v6:
Move bind/unbind to the VMA. It makes more sense in the VMA structure
(always has, but I was previously lazy). With this change, it will allow
us to keep a distinct insert_entries.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
drm/i915: Use the new vm [un]bind functions
Building on the last patch which created the new function pointers in
the VM for bind/unbind, here we actually put those new function pointers
to use.
Split out as a separate patch to aid in review. I'm fine with squashing
into the previous patch if people request it.
v2: Updated to address the smart ggtt which can do aliasing as needed
Make sure we bind to global gtt when mappable and fenceable. I thought
we could get away without this initialy, but we cannot.
v3: Make the global GTT binding explicitly use the ggtt VM for
bind_vma(). While at it, use the new ggtt_vma helper (Chris)
At this point the original mailing list thread diverges. ie.
v4^:
use target_obj instead of obj for gen6 relocate_entry
vma->bind_vma() can be called safely during pin. So simply do that
instead of the complicated conditionals.
Don't restore PPGTT bound objects on resume path
Bug fix in resume path for globally bound Bos
Properly handle secure dispatch
Rebased on vma bind/unbind conversion
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
drm/i915: reduce vm->insert_entries() usage
FKA: drm/i915: eliminate vm->insert_entries()
With bind/unbind function pointers in place, we no longer need
insert_entries. We could, and want, to remove clear_range, however it's
not totally easy at this point. Since it's used in a couple of place
still that don't only deal in objects: setup, ppgtt init, and restore
gtt mappings.
v2: Don't actually remove insert_entries, just limit its usage. It will
be useful when we introduce gen8. It will always be called from the vma
bind/unbind.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v1)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:10:56 +00:00
|
|
|
struct drm_i915_gem_object *obj = vma->obj;
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
2011-12-14 12:57:08 +00:00
|
|
|
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
|
|
|
|
bool need_fence, need_mappable;
|
drm/i915: Create bind/unbind abstraction for VMAs
To sum up what goes on here, we abstract the vma binding, similarly to
the previous object binding. This helps for distinguishing legacy
binding, versus modern binding. To keep the code churn as minimal as
possible, I am leaving in insert_entries(). It serves as the per
platform pte writing basically. bind_vma and insert_entries do share a
lot of similarities, and I did have designs to combine the two, but as
mentioned already... too much churn in an already massive patchset.
What follows are the 3 commits which existed discretely in the original
submissions. Upon rebasing on Broadwell support, it became clear that
separation was not good, and only made for more error prone code. Below
are the 3 commit messages with all their history.
drm/i915: Add bind/unbind object functions to VMA
drm/i915: Use the new vm [un]bind functions
drm/i915: reduce vm->insert_entries() usage
drm/i915: Add bind/unbind object functions to VMA
As we plumb the code with more VM information, it has become more
obvious that the easiest way to deal with bind and unbind is to simply
put the function pointers in the vm, and let those choose the correct
way to handle the page table updates. This change allows many places in
the code to simply be vm->bind, and not have to worry about
distinguishing PPGTT vs GGTT.
Notice that this patch has no impact on functionality. I've decided to
save the actual change until the next patch because I think it's easier
to review that way. I'm happy to squash the two, or let Daniel do it on
merge.
v2:
Make ggtt handle the quirky aliasing ppgtt
Add flags to bind object to support above
Don't ever call bind/unbind directly for PPGTT until we have real, full
PPGTT (use NULLs to assert this)
Make sure we rebind the ggtt if there already is a ggtt binding. This
happens on set cache levels.
Use VMA for bind/unbind (Daniel, Ben)
v3: Reorganize ggtt_vma_bind to be more concise and easier to read
(Ville). Change logic in unbind to only unbind ggtt when there is a
global mapping, and to remove a redundant check if the aliasing ppgtt
exists.
v4: Make the bind function a bit smarter about the cache levels to avoid
unnecessary multiple remaps. "I accept it is a wart, I think unifying
the pin_vma / bind_vma could be unified later" (Chris)
Removed the git notes, and put version info here. (Daniel)
v5: Update the comment to not suck (Chris)
v6:
Move bind/unbind to the VMA. It makes more sense in the VMA structure
(always has, but I was previously lazy). With this change, it will allow
us to keep a distinct insert_entries.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
drm/i915: Use the new vm [un]bind functions
Building on the last patch which created the new function pointers in
the VM for bind/unbind, here we actually put those new function pointers
to use.
Split out as a separate patch to aid in review. I'm fine with squashing
into the previous patch if people request it.
v2: Updated to address the smart ggtt which can do aliasing as needed
Make sure we bind to global gtt when mappable and fenceable. I thought
we could get away without this initialy, but we cannot.
v3: Make the global GTT binding explicitly use the ggtt VM for
bind_vma(). While at it, use the new ggtt_vma helper (Chris)
At this point the original mailing list thread diverges. ie.
v4^:
use target_obj instead of obj for gen6 relocate_entry
vma->bind_vma() can be called safely during pin. So simply do that
instead of the complicated conditionals.
Don't restore PPGTT bound objects on resume path
Bug fix in resume path for globally bound Bos
Properly handle secure dispatch
Rebased on vma bind/unbind conversion
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
drm/i915: reduce vm->insert_entries() usage
FKA: drm/i915: eliminate vm->insert_entries()
With bind/unbind function pointers in place, we no longer need
insert_entries. We could, and want, to remove clear_range, however it's
not totally easy at this point. Since it's used in a couple of place
still that don't only deal in objects: setup, ppgtt init, and restore
gtt mappings.
v2: Don't actually remove insert_entries, just limit its usage. It will
be useful when we introduce gen8. It will always be called from the vma
bind/unbind.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v1)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:10:56 +00:00
|
|
|
u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
|
|
|
|
!vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
|
2011-12-14 12:57:08 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
need_fence =
|
|
|
|
has_fenced_gpu_access &&
|
|
|
|
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
|
|
|
obj->tiling_mode != I915_TILING_NONE;
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
need_mappable = need_fence || need_reloc_mappable(vma);
|
2011-12-14 12:57:08 +00:00
|
|
|
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
|
2013-08-01 00:00:02 +00:00
|
|
|
false);
|
2011-12-14 12:57:08 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2012-08-24 18:18:18 +00:00
|
|
|
entry->flags |= __EXEC_OBJECT_HAS_PIN;
|
|
|
|
|
2011-12-14 12:57:08 +00:00
|
|
|
if (has_fenced_gpu_access) {
|
|
|
|
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
|
2012-04-17 14:31:24 +00:00
|
|
|
ret = i915_gem_object_get_fence(obj);
|
2012-03-22 15:10:00 +00:00
|
|
|
if (ret)
|
2012-08-24 18:18:18 +00:00
|
|
|
return ret;
|
2011-12-14 12:57:08 +00:00
|
|
|
|
2012-03-22 15:10:00 +00:00
|
|
|
if (i915_gem_object_pin_fence(obj))
|
2011-12-14 12:57:08 +00:00
|
|
|
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
|
2012-03-22 15:10:00 +00:00
|
|
|
|
2012-03-21 10:48:18 +00:00
|
|
|
obj->pending_fenced_gpu_access = true;
|
2011-12-14 12:57:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
if (entry->offset != vma->node.start) {
|
|
|
|
entry->offset = vma->node.start;
|
2013-01-17 21:23:36 +00:00
|
|
|
*need_reloc = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (entry->flags & EXEC_OBJECT_WRITE) {
|
|
|
|
obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
|
|
|
|
obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
|
|
|
|
}
|
|
|
|
|
drm/i915: Create bind/unbind abstraction for VMAs
To sum up what goes on here, we abstract the vma binding, similarly to
the previous object binding. This helps for distinguishing legacy
binding, versus modern binding. To keep the code churn as minimal as
possible, I am leaving in insert_entries(). It serves as the per
platform pte writing basically. bind_vma and insert_entries do share a
lot of similarities, and I did have designs to combine the two, but as
mentioned already... too much churn in an already massive patchset.
What follows are the 3 commits which existed discretely in the original
submissions. Upon rebasing on Broadwell support, it became clear that
separation was not good, and only made for more error prone code. Below
are the 3 commit messages with all their history.
drm/i915: Add bind/unbind object functions to VMA
drm/i915: Use the new vm [un]bind functions
drm/i915: reduce vm->insert_entries() usage
drm/i915: Add bind/unbind object functions to VMA
As we plumb the code with more VM information, it has become more
obvious that the easiest way to deal with bind and unbind is to simply
put the function pointers in the vm, and let those choose the correct
way to handle the page table updates. This change allows many places in
the code to simply be vm->bind, and not have to worry about
distinguishing PPGTT vs GGTT.
Notice that this patch has no impact on functionality. I've decided to
save the actual change until the next patch because I think it's easier
to review that way. I'm happy to squash the two, or let Daniel do it on
merge.
v2:
Make ggtt handle the quirky aliasing ppgtt
Add flags to bind object to support above
Don't ever call bind/unbind directly for PPGTT until we have real, full
PPGTT (use NULLs to assert this)
Make sure we rebind the ggtt if there already is a ggtt binding. This
happens on set cache levels.
Use VMA for bind/unbind (Daniel, Ben)
v3: Reorganize ggtt_vma_bind to be more concise and easier to read
(Ville). Change logic in unbind to only unbind ggtt when there is a
global mapping, and to remove a redundant check if the aliasing ppgtt
exists.
v4: Make the bind function a bit smarter about the cache levels to avoid
unnecessary multiple remaps. "I accept it is a wart, I think unifying
the pin_vma / bind_vma could be unified later" (Chris)
Removed the git notes, and put version info here. (Daniel)
v5: Update the comment to not suck (Chris)
v6:
Move bind/unbind to the VMA. It makes more sense in the VMA structure
(always has, but I was previously lazy). With this change, it will allow
us to keep a distinct insert_entries.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
drm/i915: Use the new vm [un]bind functions
Building on the last patch which created the new function pointers in
the VM for bind/unbind, here we actually put those new function pointers
to use.
Split out as a separate patch to aid in review. I'm fine with squashing
into the previous patch if people request it.
v2: Updated to address the smart ggtt which can do aliasing as needed
Make sure we bind to global gtt when mappable and fenceable. I thought
we could get away without this initialy, but we cannot.
v3: Make the global GTT binding explicitly use the ggtt VM for
bind_vma(). While at it, use the new ggtt_vma helper (Chris)
At this point the original mailing list thread diverges. ie.
v4^:
use target_obj instead of obj for gen6 relocate_entry
vma->bind_vma() can be called safely during pin. So simply do that
instead of the complicated conditionals.
Don't restore PPGTT bound objects on resume path
Bug fix in resume path for globally bound Bos
Properly handle secure dispatch
Rebased on vma bind/unbind conversion
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
drm/i915: reduce vm->insert_entries() usage
FKA: drm/i915: eliminate vm->insert_entries()
With bind/unbind function pointers in place, we no longer need
insert_entries. We could, and want, to remove clear_range, however it's
not totally easy at this point. Since it's used in a couple of place
still that don't only deal in objects: setup, ppgtt init, and restore
gtt mappings.
v2: Don't actually remove insert_entries, just limit its usage. It will
be useful when we introduce gen8. It will always be called from the vma
bind/unbind.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v1)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:10:56 +00:00
|
|
|
vma->bind_vma(vma, obj->cache_level, flags);
|
2013-01-17 21:23:36 +00:00
|
|
|
|
2011-12-14 12:57:08 +00:00
|
|
|
return 0;
|
2012-08-24 18:18:18 +00:00
|
|
|
}
|
2011-12-14 12:57:08 +00:00
|
|
|
|
2010-11-25 18:00:26 +00:00
|
|
|
static int
|
2010-11-10 16:40:20 +00:00
|
|
|
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
struct list_head *vmas,
|
2013-01-17 21:23:36 +00:00
|
|
|
bool *need_relocs)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
2010-11-25 19:32:06 +00:00
|
|
|
struct drm_i915_gem_object *obj;
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
struct i915_vma *vma;
|
2013-09-11 21:57:50 +00:00
|
|
|
struct i915_address_space *vm;
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
struct list_head ordered_vmas;
|
2012-08-24 18:18:18 +00:00
|
|
|
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
|
|
|
|
int retry;
|
2011-01-10 17:35:37 +00:00
|
|
|
|
2013-09-11 21:57:50 +00:00
|
|
|
if (list_empty(vmas))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
|
|
|
|
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
INIT_LIST_HEAD(&ordered_vmas);
|
|
|
|
while (!list_empty(vmas)) {
|
2011-01-10 17:35:37 +00:00
|
|
|
struct drm_i915_gem_exec_object2 *entry;
|
|
|
|
bool need_fence, need_mappable;
|
|
|
|
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
vma = list_first_entry(vmas, struct i915_vma, exec_list);
|
|
|
|
obj = vma->obj;
|
|
|
|
entry = vma->exec_entry;
|
2011-01-10 17:35:37 +00:00
|
|
|
|
|
|
|
need_fence =
|
|
|
|
has_fenced_gpu_access &&
|
|
|
|
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
|
|
|
obj->tiling_mode != I915_TILING_NONE;
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
need_mappable = need_fence || need_reloc_mappable(vma);
|
2011-01-10 17:35:37 +00:00
|
|
|
|
|
|
|
if (need_mappable)
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
list_move(&vma->exec_list, &ordered_vmas);
|
2011-01-10 17:35:37 +00:00
|
|
|
else
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
list_move_tail(&vma->exec_list, &ordered_vmas);
|
2011-01-13 11:03:48 +00:00
|
|
|
|
2013-01-17 21:23:36 +00:00
|
|
|
obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
|
2011-01-13 11:03:48 +00:00
|
|
|
obj->base.pending_write_domain = 0;
|
2012-07-20 11:41:07 +00:00
|
|
|
obj->pending_fenced_gpu_access = false;
|
2011-01-10 17:35:37 +00:00
|
|
|
}
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
list_splice(&ordered_vmas, vmas);
|
2010-11-25 18:00:26 +00:00
|
|
|
|
|
|
|
/* Attempt to pin all of the buffers into the GTT.
|
|
|
|
* This is done in 3 phases:
|
|
|
|
*
|
|
|
|
* 1a. Unbind all objects that do not match the GTT constraints for
|
|
|
|
* the execbuffer (fenceable, mappable, alignment etc).
|
|
|
|
* 1b. Increment pin count for already bound objects.
|
|
|
|
* 2. Bind new objects.
|
|
|
|
* 3. Decrement pin count.
|
|
|
|
*
|
2012-08-24 18:18:18 +00:00
|
|
|
* This avoid unnecessary unbinding of later objects in order to make
|
2010-11-25 18:00:26 +00:00
|
|
|
* room for the earlier objects *unless* we need to defragment.
|
|
|
|
*/
|
|
|
|
retry = 0;
|
|
|
|
do {
|
2012-08-24 18:18:18 +00:00
|
|
|
int ret = 0;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
|
|
|
/* Unbind any ill-fitting objects or pin. */
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
list_for_each_entry(vma, vmas, exec_list) {
|
|
|
|
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
2010-11-25 18:00:26 +00:00
|
|
|
bool need_fence, need_mappable;
|
2011-12-14 12:57:08 +00:00
|
|
|
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
obj = vma->obj;
|
|
|
|
|
|
|
|
if (!drm_mm_node_allocated(&vma->node))
|
2010-11-25 18:00:26 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
need_fence =
|
2010-12-05 17:11:54 +00:00
|
|
|
has_fenced_gpu_access &&
|
2010-11-25 18:00:26 +00:00
|
|
|
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
|
|
|
obj->tiling_mode != I915_TILING_NONE;
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
need_mappable = need_fence || need_reloc_mappable(vma);
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2013-08-01 00:00:02 +00:00
|
|
|
WARN_ON((need_mappable || need_fence) &&
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
!i915_is_ggtt(vma->vm));
|
2013-08-01 00:00:02 +00:00
|
|
|
|
2013-07-05 21:41:04 +00:00
|
|
|
if ((entry->alignment &&
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
vma->node.start & (entry->alignment - 1)) ||
|
2010-11-25 18:00:26 +00:00
|
|
|
(need_mappable && !obj->map_and_fenceable))
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
ret = i915_vma_unbind(vma);
|
2010-11-25 18:00:26 +00:00
|
|
|
else
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
|
2010-11-25 19:32:06 +00:00
|
|
|
if (ret)
|
2010-11-25 18:00:26 +00:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Bind fresh objects */
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
list_for_each_entry(vma, vmas, exec_list) {
|
|
|
|
if (drm_mm_node_allocated(&vma->node))
|
2011-12-14 12:57:08 +00:00
|
|
|
continue;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
|
2012-08-24 18:18:18 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
2013-11-26 11:23:15 +00:00
|
|
|
err:
|
drm/i915: Track unbound pages
When dealing with a working set larger than the GATT, or even the
mappable aperture when touching through the GTT, we end up with evicting
objects only to rebind them at a new offset again later. Moving an
object into and out of the GTT requires clflushing the pages, thus
causing a double-clflush penalty for rebinding.
To avoid having to clflush on rebinding, we can track the pages as they
are evicted from the GTT and only relinquish those pages on memory
pressure.
As usual, if it were not for the handling of out-of-memory condition and
having to manually shrink our own bo caches, it would be a net reduction
of code. Alas.
Note: The patch also contains a few changes to the last-hope
evict_everything logic in i916_gem_execbuffer.c - we no longer try to
only evict the purgeable stuff in a first try (since that's superflous
and only helps in OOM corner-cases, not fragmented-gtt trashing
situations).
Also, the extraction of the get_pages retry loop from bind_to_gtt (and
other callsites) to get_pages should imo have been a separate patch.
v2: Ditch the newly added put_pages (for unbound objects only) in
i915_gem_reset. A quick irc discussion hasn't revealed any important
reason for this, so if we need this, I'd like to have a git blame'able
explanation for it.
v3: Undo the s/drm_malloc_ab/kmalloc/ in get_pages that Chris noticed.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
[danvet: Split out code movements and rant a bit in the commit message
with a few Notes. Done v2]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-08-20 09:40:46 +00:00
|
|
|
if (ret != -ENOSPC || retry++)
|
2010-11-25 18:00:26 +00:00
|
|
|
return ret;
|
|
|
|
|
2013-11-26 11:23:15 +00:00
|
|
|
/* Decrement pin count for bound objects */
|
|
|
|
list_for_each_entry(vma, vmas, exec_list)
|
|
|
|
i915_gem_execbuffer_unreserve_vma(vma);
|
|
|
|
|
2013-09-11 21:57:50 +00:00
|
|
|
ret = i915_gem_evict_vm(vm, true);
|
2010-11-25 18:00:26 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
} while (1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
2013-01-17 21:23:36 +00:00
|
|
|
struct drm_i915_gem_execbuffer2 *args,
|
2010-11-25 18:00:26 +00:00
|
|
|
struct drm_file *file,
|
2010-11-10 16:40:20 +00:00
|
|
|
struct intel_ring_buffer *ring,
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
struct eb_vmas *eb,
|
|
|
|
struct drm_i915_gem_exec_object2 *exec)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
|
|
|
struct drm_i915_gem_relocation_entry *reloc;
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
struct i915_address_space *vm;
|
|
|
|
struct i915_vma *vma;
|
2013-01-17 21:23:36 +00:00
|
|
|
bool need_relocs;
|
2011-01-12 23:49:13 +00:00
|
|
|
int *reloc_offset;
|
2010-11-25 18:00:26 +00:00
|
|
|
int i, total, ret;
|
2013-09-19 12:00:11 +00:00
|
|
|
unsigned count = args->buffer_count;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
if (WARN_ON(list_empty(&eb->vmas)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
|
|
|
|
|
2010-12-08 10:38:14 +00:00
|
|
|
/* We may process another execbuffer during the unlock... */
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
while (!list_empty(&eb->vmas)) {
|
|
|
|
vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
|
|
|
|
list_del_init(&vma->exec_list);
|
2013-11-26 11:23:15 +00:00
|
|
|
i915_gem_execbuffer_unreserve_vma(vma);
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
drm_gem_object_unreference(&vma->obj->base);
|
2010-12-08 10:38:14 +00:00
|
|
|
}
|
|
|
|
|
2010-11-25 18:00:26 +00:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
|
|
total = 0;
|
|
|
|
for (i = 0; i < count; i++)
|
2010-11-25 19:32:06 +00:00
|
|
|
total += exec[i].relocation_count;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2011-01-12 23:49:13 +00:00
|
|
|
reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
|
2010-11-25 18:00:26 +00:00
|
|
|
reloc = drm_malloc_ab(total, sizeof(*reloc));
|
2011-01-12 23:49:13 +00:00
|
|
|
if (reloc == NULL || reloc_offset == NULL) {
|
|
|
|
drm_free_large(reloc);
|
|
|
|
drm_free_large(reloc_offset);
|
2010-11-25 18:00:26 +00:00
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
total = 0;
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
struct drm_i915_gem_relocation_entry __user *user_relocs;
|
2013-01-15 16:17:54 +00:00
|
|
|
u64 invalid_offset = (u64)-1;
|
|
|
|
int j;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2013-02-22 14:12:51 +00:00
|
|
|
user_relocs = to_user_ptr(exec[i].relocs_ptr);
|
2010-11-25 18:00:26 +00:00
|
|
|
|
|
|
|
if (copy_from_user(reloc+total, user_relocs,
|
2010-11-25 19:32:06 +00:00
|
|
|
exec[i].relocation_count * sizeof(*reloc))) {
|
2010-11-25 18:00:26 +00:00
|
|
|
ret = -EFAULT;
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2013-01-15 16:17:54 +00:00
|
|
|
/* As we do not update the known relocation offsets after
|
|
|
|
* relocating (due to the complexities in lock handling),
|
|
|
|
* we need to mark them as invalid now so that we force the
|
|
|
|
* relocation processing next time. Just in case the target
|
|
|
|
* object is evicted and then rebound into its old
|
|
|
|
* presumed_offset before the next execbuffer - if that
|
|
|
|
* happened we would make the mistake of assuming that the
|
|
|
|
* relocations were valid.
|
|
|
|
*/
|
|
|
|
for (j = 0; j < exec[i].relocation_count; j++) {
|
|
|
|
if (copy_to_user(&user_relocs[j].presumed_offset,
|
|
|
|
&invalid_offset,
|
|
|
|
sizeof(invalid_offset))) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-12 23:49:13 +00:00
|
|
|
reloc_offset[i] = total;
|
2010-11-25 19:32:06 +00:00
|
|
|
total += exec[i].relocation_count;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = i915_mutex_lock_interruptible(dev);
|
|
|
|
if (ret) {
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2010-12-08 10:38:14 +00:00
|
|
|
/* reacquire the objects */
|
|
|
|
eb_reset(eb);
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
ret = eb_lookup_vmas(eb, exec, args, vm, file);
|
2013-01-08 10:53:14 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2010-12-08 10:38:14 +00:00
|
|
|
|
2013-01-17 21:23:36 +00:00
|
|
|
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
|
2010-11-25 18:00:26 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
list_for_each_entry(vma, &eb->vmas, exec_list) {
|
|
|
|
int offset = vma->exec_entry - exec;
|
|
|
|
ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
|
|
|
|
reloc + reloc_offset[offset]);
|
2010-11-25 18:00:26 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Leave the user relocations as are, this is the painfully slow path,
|
|
|
|
* and we want to avoid the complication of dropping the lock whilst
|
|
|
|
* having buffers reserved in the aperture and so causing spurious
|
|
|
|
* ENOSPC for random operations.
|
|
|
|
*/
|
|
|
|
|
|
|
|
err:
|
|
|
|
drm_free_large(reloc);
|
2011-01-12 23:49:13 +00:00
|
|
|
drm_free_large(reloc_offset);
|
2010-11-25 18:00:26 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2010-11-25 19:32:06 +00:00
|
|
|
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
struct list_head *vmas)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
struct i915_vma *vma;
|
2012-07-21 10:25:01 +00:00
|
|
|
uint32_t flush_domains = 0;
|
2013-08-08 13:41:09 +00:00
|
|
|
bool flush_chipset = false;
|
2010-11-25 19:32:06 +00:00
|
|
|
int ret;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
list_for_each_entry(vma, vmas, exec_list) {
|
|
|
|
struct drm_i915_gem_object *obj = vma->obj;
|
2012-07-21 10:25:01 +00:00
|
|
|
ret = i915_gem_object_sync(obj, ring);
|
2011-03-06 13:51:29 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2012-07-21 10:25:01 +00:00
|
|
|
|
|
|
|
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
|
2013-08-08 13:41:09 +00:00
|
|
|
flush_chipset |= i915_gem_clflush_object(obj, false);
|
2012-07-21 10:25:01 +00:00
|
|
|
|
|
|
|
flush_domains |= obj->base.write_domain;
|
2011-03-06 13:51:29 +00:00
|
|
|
}
|
|
|
|
|
2013-08-08 13:41:09 +00:00
|
|
|
if (flush_chipset)
|
2012-11-04 17:21:27 +00:00
|
|
|
i915_gem_chipset_flush(ring->dev);
|
2012-07-21 10:25:01 +00:00
|
|
|
|
|
|
|
if (flush_domains & I915_GEM_DOMAIN_GTT)
|
|
|
|
wmb();
|
|
|
|
|
2012-07-13 13:14:08 +00:00
|
|
|
/* Unconditionally invalidate gpu caches and ensure that we do flush
|
|
|
|
* any residual writes from the previous batch.
|
|
|
|
*/
|
2012-07-20 11:41:08 +00:00
|
|
|
return intel_ring_invalidate_all_caches(ring);
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
2010-11-25 19:32:06 +00:00
|
|
|
static bool
|
|
|
|
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
2013-01-17 21:23:36 +00:00
|
|
|
if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
|
|
|
|
return false;
|
|
|
|
|
2010-11-25 19:32:06 +00:00
|
|
|
return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
|
|
|
|
int count)
|
|
|
|
{
|
|
|
|
int i;
|
2013-09-19 12:00:11 +00:00
|
|
|
unsigned relocs_total = 0;
|
|
|
|
unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
|
2010-11-25 18:00:26 +00:00
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
2013-02-22 14:12:51 +00:00
|
|
|
char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
|
2010-11-25 18:00:26 +00:00
|
|
|
int length; /* limited by fault_in_pages_readable() */
|
|
|
|
|
2013-01-17 21:23:36 +00:00
|
|
|
if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2013-03-12 00:31:45 +00:00
|
|
|
/* First check for malicious input causing overflow in
|
|
|
|
* the worst case where we need to allocate the entire
|
|
|
|
* relocation tree as a single array.
|
|
|
|
*/
|
|
|
|
if (exec[i].relocation_count > relocs_max - relocs_total)
|
2010-11-25 18:00:26 +00:00
|
|
|
return -EINVAL;
|
2013-03-12 00:31:45 +00:00
|
|
|
relocs_total += exec[i].relocation_count;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
|
|
|
length = exec[i].relocation_count *
|
|
|
|
sizeof(struct drm_i915_gem_relocation_entry);
|
2013-03-11 21:37:35 +00:00
|
|
|
/*
|
|
|
|
* We must check that the entire relocation array is safe
|
|
|
|
* to read, but since we may need to update the presumed
|
|
|
|
* offsets during execution, check for full write access.
|
|
|
|
*/
|
2010-11-25 18:00:26 +00:00
|
|
|
if (!access_ok(VERIFY_WRITE, ptr, length))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2013-07-19 05:51:24 +00:00
|
|
|
if (likely(!i915_prefault_disable)) {
|
|
|
|
if (fault_in_multipages_readable(ptr, length))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
drm/i915: Get context early in execbuf
We need to have the address space when reserving space for the objects.
Since the address space and context are tied together, and reserve
occurs before context switch (for good reason), we must lookup our
context earlier in the process.
This leaves some room for optimizations where we no longer need to use
ctx_id in certain places. This will be addressed in a subsequent patch.
Important tricky bit:
Because slow relocations during execbuffer drop struct_mutex
Perhaps it would be best to acquire the reference when we get the
context, but I'll save that for another day (note I have written the
patch before, and I found the changes required to be uglier than this).
Note that since we currently access everything via context id, and not
the data structure this is fine, though not desirable. The next change
attempts to get the context only once via the context ID idr lookup, and
as such, the following can happen:
CTX-A is created, refcount = 1
CTX-A execbuf, mutex dropped
close IOCTL called on CTX-A, refcount = 0
CTX-A resumes in execbuf.
v2: Rebased on top of
commit b6359918b885da7c7b58c050674278dbd06020ab
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Wed Oct 30 15:44:16 2013 +0200
drm/i915: add i915_get_reset_stats_ioctl
v3: Rebased on top of
commit 25b3dfc87bff80317d67ddd2cd4cfb91e6fe7d79
Author: Mika Westerberg <mika.westerberg@linux.intel.com>
Date: Tue Nov 12 11:57:30 2013 +0200
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Tue Nov 26 16:14:33 2013 +0200
drm/i915: check context reset stats before relocations
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:11:21 +00:00
|
|
|
static struct i915_hw_context *
|
2013-11-26 14:14:33 +00:00
|
|
|
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
|
2013-12-18 15:37:49 +00:00
|
|
|
struct intel_ring_buffer *ring, const u32 ctx_id)
|
2013-11-26 14:14:33 +00:00
|
|
|
{
|
drm/i915: Get context early in execbuf
We need to have the address space when reserving space for the objects.
Since the address space and context are tied together, and reserve
occurs before context switch (for good reason), we must lookup our
context earlier in the process.
This leaves some room for optimizations where we no longer need to use
ctx_id in certain places. This will be addressed in a subsequent patch.
Important tricky bit:
Because slow relocations during execbuffer drop struct_mutex
Perhaps it would be best to acquire the reference when we get the
context, but I'll save that for another day (note I have written the
patch before, and I found the changes required to be uglier than this).
Note that since we currently access everything via context id, and not
the data structure this is fine, though not desirable. The next change
attempts to get the context only once via the context ID idr lookup, and
as such, the following can happen:
CTX-A is created, refcount = 1
CTX-A execbuf, mutex dropped
close IOCTL called on CTX-A, refcount = 0
CTX-A resumes in execbuf.
v2: Rebased on top of
commit b6359918b885da7c7b58c050674278dbd06020ab
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Wed Oct 30 15:44:16 2013 +0200
drm/i915: add i915_get_reset_stats_ioctl
v3: Rebased on top of
commit 25b3dfc87bff80317d67ddd2cd4cfb91e6fe7d79
Author: Mika Westerberg <mika.westerberg@linux.intel.com>
Date: Tue Nov 12 11:57:30 2013 +0200
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Tue Nov 26 16:14:33 2013 +0200
drm/i915: check context reset stats before relocations
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:11:21 +00:00
|
|
|
struct i915_hw_context *ctx = NULL;
|
2013-11-26 14:14:33 +00:00
|
|
|
struct i915_ctx_hang_stats *hs;
|
|
|
|
|
2013-12-18 15:37:49 +00:00
|
|
|
if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
drm/i915: Get context early in execbuf
We need to have the address space when reserving space for the objects.
Since the address space and context are tied together, and reserve
occurs before context switch (for good reason), we must lookup our
context earlier in the process.
This leaves some room for optimizations where we no longer need to use
ctx_id in certain places. This will be addressed in a subsequent patch.
Important tricky bit:
Because slow relocations during execbuffer drop struct_mutex
Perhaps it would be best to acquire the reference when we get the
context, but I'll save that for another day (note I have written the
patch before, and I found the changes required to be uglier than this).
Note that since we currently access everything via context id, and not
the data structure this is fine, though not desirable. The next change
attempts to get the context only once via the context ID idr lookup, and
as such, the following can happen:
CTX-A is created, refcount = 1
CTX-A execbuf, mutex dropped
close IOCTL called on CTX-A, refcount = 0
CTX-A resumes in execbuf.
v2: Rebased on top of
commit b6359918b885da7c7b58c050674278dbd06020ab
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Wed Oct 30 15:44:16 2013 +0200
drm/i915: add i915_get_reset_stats_ioctl
v3: Rebased on top of
commit 25b3dfc87bff80317d67ddd2cd4cfb91e6fe7d79
Author: Mika Westerberg <mika.westerberg@linux.intel.com>
Date: Tue Nov 12 11:57:30 2013 +0200
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Tue Nov 26 16:14:33 2013 +0200
drm/i915: check context reset stats before relocations
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:11:21 +00:00
|
|
|
ctx = i915_gem_context_get(file->driver_priv, ctx_id);
|
2014-01-03 05:50:27 +00:00
|
|
|
if (IS_ERR(ctx))
|
drm/i915: Get context early in execbuf
We need to have the address space when reserving space for the objects.
Since the address space and context are tied together, and reserve
occurs before context switch (for good reason), we must lookup our
context earlier in the process.
This leaves some room for optimizations where we no longer need to use
ctx_id in certain places. This will be addressed in a subsequent patch.
Important tricky bit:
Because slow relocations during execbuffer drop struct_mutex
Perhaps it would be best to acquire the reference when we get the
context, but I'll save that for another day (note I have written the
patch before, and I found the changes required to be uglier than this).
Note that since we currently access everything via context id, and not
the data structure this is fine, though not desirable. The next change
attempts to get the context only once via the context ID idr lookup, and
as such, the following can happen:
CTX-A is created, refcount = 1
CTX-A execbuf, mutex dropped
close IOCTL called on CTX-A, refcount = 0
CTX-A resumes in execbuf.
v2: Rebased on top of
commit b6359918b885da7c7b58c050674278dbd06020ab
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Wed Oct 30 15:44:16 2013 +0200
drm/i915: add i915_get_reset_stats_ioctl
v3: Rebased on top of
commit 25b3dfc87bff80317d67ddd2cd4cfb91e6fe7d79
Author: Mika Westerberg <mika.westerberg@linux.intel.com>
Date: Tue Nov 12 11:57:30 2013 +0200
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Tue Nov 26 16:14:33 2013 +0200
drm/i915: check context reset stats before relocations
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:11:21 +00:00
|
|
|
return ctx;
|
2013-11-26 14:14:33 +00:00
|
|
|
|
drm/i915: Get context early in execbuf
We need to have the address space when reserving space for the objects.
Since the address space and context are tied together, and reserve
occurs before context switch (for good reason), we must lookup our
context earlier in the process.
This leaves some room for optimizations where we no longer need to use
ctx_id in certain places. This will be addressed in a subsequent patch.
Important tricky bit:
Because slow relocations during execbuffer drop struct_mutex
Perhaps it would be best to acquire the reference when we get the
context, but I'll save that for another day (note I have written the
patch before, and I found the changes required to be uglier than this).
Note that since we currently access everything via context id, and not
the data structure this is fine, though not desirable. The next change
attempts to get the context only once via the context ID idr lookup, and
as such, the following can happen:
CTX-A is created, refcount = 1
CTX-A execbuf, mutex dropped
close IOCTL called on CTX-A, refcount = 0
CTX-A resumes in execbuf.
v2: Rebased on top of
commit b6359918b885da7c7b58c050674278dbd06020ab
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Wed Oct 30 15:44:16 2013 +0200
drm/i915: add i915_get_reset_stats_ioctl
v3: Rebased on top of
commit 25b3dfc87bff80317d67ddd2cd4cfb91e6fe7d79
Author: Mika Westerberg <mika.westerberg@linux.intel.com>
Date: Tue Nov 12 11:57:30 2013 +0200
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Tue Nov 26 16:14:33 2013 +0200
drm/i915: check context reset stats before relocations
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:11:21 +00:00
|
|
|
hs = &ctx->hang_stats;
|
2013-11-26 14:14:33 +00:00
|
|
|
if (hs->banned) {
|
|
|
|
DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
|
drm/i915: Get context early in execbuf
We need to have the address space when reserving space for the objects.
Since the address space and context are tied together, and reserve
occurs before context switch (for good reason), we must lookup our
context earlier in the process.
This leaves some room for optimizations where we no longer need to use
ctx_id in certain places. This will be addressed in a subsequent patch.
Important tricky bit:
Because slow relocations during execbuffer drop struct_mutex
Perhaps it would be best to acquire the reference when we get the
context, but I'll save that for another day (note I have written the
patch before, and I found the changes required to be uglier than this).
Note that since we currently access everything via context id, and not
the data structure this is fine, though not desirable. The next change
attempts to get the context only once via the context ID idr lookup, and
as such, the following can happen:
CTX-A is created, refcount = 1
CTX-A execbuf, mutex dropped
close IOCTL called on CTX-A, refcount = 0
CTX-A resumes in execbuf.
v2: Rebased on top of
commit b6359918b885da7c7b58c050674278dbd06020ab
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Wed Oct 30 15:44:16 2013 +0200
drm/i915: add i915_get_reset_stats_ioctl
v3: Rebased on top of
commit 25b3dfc87bff80317d67ddd2cd4cfb91e6fe7d79
Author: Mika Westerberg <mika.westerberg@linux.intel.com>
Date: Tue Nov 12 11:57:30 2013 +0200
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Tue Nov 26 16:14:33 2013 +0200
drm/i915: check context reset stats before relocations
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:11:21 +00:00
|
|
|
return ERR_PTR(-EIO);
|
2013-11-26 14:14:33 +00:00
|
|
|
}
|
|
|
|
|
drm/i915: Get context early in execbuf
We need to have the address space when reserving space for the objects.
Since the address space and context are tied together, and reserve
occurs before context switch (for good reason), we must lookup our
context earlier in the process.
This leaves some room for optimizations where we no longer need to use
ctx_id in certain places. This will be addressed in a subsequent patch.
Important tricky bit:
Because slow relocations during execbuffer drop struct_mutex
Perhaps it would be best to acquire the reference when we get the
context, but I'll save that for another day (note I have written the
patch before, and I found the changes required to be uglier than this).
Note that since we currently access everything via context id, and not
the data structure this is fine, though not desirable. The next change
attempts to get the context only once via the context ID idr lookup, and
as such, the following can happen:
CTX-A is created, refcount = 1
CTX-A execbuf, mutex dropped
close IOCTL called on CTX-A, refcount = 0
CTX-A resumes in execbuf.
v2: Rebased on top of
commit b6359918b885da7c7b58c050674278dbd06020ab
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Wed Oct 30 15:44:16 2013 +0200
drm/i915: add i915_get_reset_stats_ioctl
v3: Rebased on top of
commit 25b3dfc87bff80317d67ddd2cd4cfb91e6fe7d79
Author: Mika Westerberg <mika.westerberg@linux.intel.com>
Date: Tue Nov 12 11:57:30 2013 +0200
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Tue Nov 26 16:14:33 2013 +0200
drm/i915: check context reset stats before relocations
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:11:21 +00:00
|
|
|
return ctx;
|
2013-11-26 14:14:33 +00:00
|
|
|
}
|
|
|
|
|
2010-11-25 19:32:06 +00:00
|
|
|
static void
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
2012-11-27 16:22:52 +00:00
|
|
|
struct intel_ring_buffer *ring)
|
2010-11-25 19:32:06 +00:00
|
|
|
{
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
struct i915_vma *vma;
|
2010-11-25 19:32:06 +00:00
|
|
|
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
list_for_each_entry(vma, vmas, exec_list) {
|
|
|
|
struct drm_i915_gem_object *obj = vma->obj;
|
2012-07-20 11:41:03 +00:00
|
|
|
u32 old_read = obj->base.read_domains;
|
|
|
|
u32 old_write = obj->base.write_domain;
|
2011-02-03 11:57:46 +00:00
|
|
|
|
2010-11-25 19:32:06 +00:00
|
|
|
obj->base.write_domain = obj->base.pending_write_domain;
|
2013-01-17 21:23:36 +00:00
|
|
|
if (obj->base.write_domain == 0)
|
|
|
|
obj->base.pending_read_domains |= obj->base.read_domains;
|
|
|
|
obj->base.read_domains = obj->base.pending_read_domains;
|
2010-11-25 19:32:06 +00:00
|
|
|
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
|
|
|
|
|
2013-09-24 16:57:58 +00:00
|
|
|
i915_vma_move_to_active(vma, ring);
|
2010-11-25 19:32:06 +00:00
|
|
|
if (obj->base.write_domain) {
|
|
|
|
obj->dirty = 1;
|
2012-11-27 16:22:52 +00:00
|
|
|
obj->last_write_seqno = intel_ring_get_seqno(ring);
|
2013-12-06 22:10:55 +00:00
|
|
|
/* check for potential scanout */
|
|
|
|
if (i915_gem_obj_ggtt_bound(obj) &&
|
|
|
|
i915_gem_obj_to_ggtt(obj)->pin_count)
|
2013-06-06 19:53:41 +00:00
|
|
|
intel_mark_fb_busy(obj, ring);
|
2010-11-25 19:32:06 +00:00
|
|
|
}
|
|
|
|
|
2011-02-03 11:57:46 +00:00
|
|
|
trace_i915_gem_object_change_domain(obj, old_read, old_write);
|
2010-11-25 19:32:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-11-25 18:00:26 +00:00
|
|
|
static void
|
|
|
|
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
|
2010-11-25 19:32:06 +00:00
|
|
|
struct drm_file *file,
|
2013-06-12 12:01:39 +00:00
|
|
|
struct intel_ring_buffer *ring,
|
|
|
|
struct drm_i915_gem_object *obj)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
2012-06-13 18:45:19 +00:00
|
|
|
/* Unconditionally force add_request to emit a full flush. */
|
|
|
|
ring->gpu_caches_dirty = true;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2010-11-25 19:32:06 +00:00
|
|
|
/* Add a breadcrumb for the completion of the batch buffer */
|
2013-06-12 12:01:39 +00:00
|
|
|
(void)__i915_add_request(ring, file, obj, NULL);
|
2010-11-25 19:32:06 +00:00
|
|
|
}
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2012-01-03 17:23:29 +00:00
|
|
|
static int
|
|
|
|
i915_reset_gen7_sol_offsets(struct drm_device *dev,
|
|
|
|
struct intel_ring_buffer *ring)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = intel_ring_begin(ring, 4 * 3);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
|
|
|
intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
|
|
|
|
intel_ring_emit(ring, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
intel_ring_advance(ring);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-11-25 18:00:26 +00:00
|
|
|
static int
|
|
|
|
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file,
|
|
|
|
struct drm_i915_gem_execbuffer2 *args,
|
drm/i915: Get context early in execbuf
We need to have the address space when reserving space for the objects.
Since the address space and context are tied together, and reserve
occurs before context switch (for good reason), we must lookup our
context earlier in the process.
This leaves some room for optimizations where we no longer need to use
ctx_id in certain places. This will be addressed in a subsequent patch.
Important tricky bit:
Because slow relocations during execbuffer drop struct_mutex
Perhaps it would be best to acquire the reference when we get the
context, but I'll save that for another day (note I have written the
patch before, and I found the changes required to be uglier than this).
Note that since we currently access everything via context id, and not
the data structure this is fine, though not desirable. The next change
attempts to get the context only once via the context ID idr lookup, and
as such, the following can happen:
CTX-A is created, refcount = 1
CTX-A execbuf, mutex dropped
close IOCTL called on CTX-A, refcount = 0
CTX-A resumes in execbuf.
v2: Rebased on top of
commit b6359918b885da7c7b58c050674278dbd06020ab
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Wed Oct 30 15:44:16 2013 +0200
drm/i915: add i915_get_reset_stats_ioctl
v3: Rebased on top of
commit 25b3dfc87bff80317d67ddd2cd4cfb91e6fe7d79
Author: Mika Westerberg <mika.westerberg@linux.intel.com>
Date: Tue Nov 12 11:57:30 2013 +0200
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Tue Nov 26 16:14:33 2013 +0200
drm/i915: check context reset stats before relocations
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:11:21 +00:00
|
|
|
struct drm_i915_gem_exec_object2 *exec)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
struct eb_vmas *eb;
|
2010-11-25 18:00:26 +00:00
|
|
|
struct drm_i915_gem_object *batch_obj;
|
|
|
|
struct drm_clip_rect *cliprects = NULL;
|
|
|
|
struct intel_ring_buffer *ring;
|
drm/i915: Get context early in execbuf
We need to have the address space when reserving space for the objects.
Since the address space and context are tied together, and reserve
occurs before context switch (for good reason), we must lookup our
context earlier in the process.
This leaves some room for optimizations where we no longer need to use
ctx_id in certain places. This will be addressed in a subsequent patch.
Important tricky bit:
Because slow relocations during execbuffer drop struct_mutex
Perhaps it would be best to acquire the reference when we get the
context, but I'll save that for another day (note I have written the
patch before, and I found the changes required to be uglier than this).
Note that since we currently access everything via context id, and not
the data structure this is fine, though not desirable. The next change
attempts to get the context only once via the context ID idr lookup, and
as such, the following can happen:
CTX-A is created, refcount = 1
CTX-A execbuf, mutex dropped
close IOCTL called on CTX-A, refcount = 0
CTX-A resumes in execbuf.
v2: Rebased on top of
commit b6359918b885da7c7b58c050674278dbd06020ab
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Wed Oct 30 15:44:16 2013 +0200
drm/i915: add i915_get_reset_stats_ioctl
v3: Rebased on top of
commit 25b3dfc87bff80317d67ddd2cd4cfb91e6fe7d79
Author: Mika Westerberg <mika.westerberg@linux.intel.com>
Date: Tue Nov 12 11:57:30 2013 +0200
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Tue Nov 26 16:14:33 2013 +0200
drm/i915: check context reset stats before relocations
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:11:21 +00:00
|
|
|
struct i915_hw_context *ctx;
|
|
|
|
struct i915_address_space *vm;
|
2013-11-26 14:14:33 +00:00
|
|
|
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
|
2013-12-06 22:11:26 +00:00
|
|
|
u32 exec_start = args->batch_start_offset, exec_len;
|
2013-01-17 21:23:36 +00:00
|
|
|
u32 mask, flags;
|
2010-12-19 11:42:05 +00:00
|
|
|
int ret, mode, i;
|
2013-01-17 21:23:36 +00:00
|
|
|
bool need_relocs;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2013-01-17 21:23:36 +00:00
|
|
|
if (!i915_gem_check_execbuffer(args))
|
2010-11-25 19:32:06 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ret = validate_exec_list(exec, args->buffer_count);
|
2010-11-25 18:00:26 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2012-10-17 11:09:54 +00:00
|
|
|
flags = 0;
|
|
|
|
if (args->flags & I915_EXEC_SECURE) {
|
|
|
|
if (!file->is_master || !capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
flags |= I915_DISPATCH_SECURE;
|
|
|
|
}
|
2012-12-17 15:21:27 +00:00
|
|
|
if (args->flags & I915_EXEC_IS_PINNED)
|
|
|
|
flags |= I915_DISPATCH_PINNED;
|
2012-10-17 11:09:54 +00:00
|
|
|
|
2013-12-06 22:11:00 +00:00
|
|
|
if ((args->flags & I915_EXEC_RING_MASK) > I915_NUM_RINGS) {
|
2012-01-31 20:08:14 +00:00
|
|
|
DRM_DEBUG("execbuf with unknown ring: %d\n",
|
2010-11-25 18:00:26 +00:00
|
|
|
(int)(args->flags & I915_EXEC_RING_MASK));
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2013-12-06 22:11:00 +00:00
|
|
|
|
|
|
|
if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
|
|
|
|
ring = &dev_priv->ring[RCS];
|
|
|
|
else
|
|
|
|
ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
|
|
|
|
|
2012-05-11 13:29:31 +00:00
|
|
|
if (!intel_ring_initialized(ring)) {
|
|
|
|
DRM_DEBUG("execbuf with invalid ring: %d\n",
|
|
|
|
(int)(args->flags & I915_EXEC_RING_MASK));
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2010-12-19 11:42:05 +00:00
|
|
|
mode = args->flags & I915_EXEC_CONSTANTS_MASK;
|
2011-12-13 03:21:58 +00:00
|
|
|
mask = I915_EXEC_CONSTANTS_MASK;
|
2010-12-19 11:42:05 +00:00
|
|
|
switch (mode) {
|
|
|
|
case I915_EXEC_CONSTANTS_REL_GENERAL:
|
|
|
|
case I915_EXEC_CONSTANTS_ABSOLUTE:
|
|
|
|
case I915_EXEC_CONSTANTS_REL_SURFACE:
|
|
|
|
if (ring == &dev_priv->ring[RCS] &&
|
|
|
|
mode != dev_priv->relative_constants_mode) {
|
|
|
|
if (INTEL_INFO(dev)->gen < 4)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (INTEL_INFO(dev)->gen > 5 &&
|
|
|
|
mode == I915_EXEC_CONSTANTS_REL_SURFACE)
|
|
|
|
return -EINVAL;
|
2011-12-13 03:21:58 +00:00
|
|
|
|
|
|
|
/* The HW changed the meaning on this bit on gen6 */
|
|
|
|
if (INTEL_INFO(dev)->gen >= 6)
|
|
|
|
mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
|
2010-12-19 11:42:05 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
2012-01-31 20:08:14 +00:00
|
|
|
DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
|
2010-12-19 11:42:05 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2010-11-25 18:00:26 +00:00
|
|
|
if (args->buffer_count < 1) {
|
2012-01-31 20:08:14 +00:00
|
|
|
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
|
2010-11-25 18:00:26 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (args->num_cliprects != 0) {
|
2010-12-04 11:30:53 +00:00
|
|
|
if (ring != &dev_priv->ring[RCS]) {
|
2012-01-31 20:08:14 +00:00
|
|
|
DRM_DEBUG("clip rectangles are only valid with the render ring\n");
|
2010-11-30 14:10:25 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2012-04-26 21:28:11 +00:00
|
|
|
if (INTEL_INFO(dev)->gen >= 5) {
|
|
|
|
DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2012-04-23 08:06:42 +00:00
|
|
|
if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
|
|
|
|
DRM_DEBUG("execbuf with %u cliprects\n",
|
|
|
|
args->num_cliprects);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2012-05-08 11:39:59 +00:00
|
|
|
|
2013-09-20 22:35:38 +00:00
|
|
|
cliprects = kcalloc(args->num_cliprects,
|
|
|
|
sizeof(*cliprects),
|
2010-11-25 18:00:26 +00:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (cliprects == NULL) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto pre_mutex_err;
|
|
|
|
}
|
|
|
|
|
2010-11-25 19:32:06 +00:00
|
|
|
if (copy_from_user(cliprects,
|
2013-02-22 14:12:51 +00:00
|
|
|
to_user_ptr(args->cliprects_ptr),
|
|
|
|
sizeof(*cliprects)*args->num_cliprects)) {
|
2010-11-25 18:00:26 +00:00
|
|
|
ret = -EFAULT;
|
|
|
|
goto pre_mutex_err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-27 20:20:34 +00:00
|
|
|
intel_runtime_pm_get(dev_priv);
|
|
|
|
|
2010-11-25 18:00:26 +00:00
|
|
|
ret = i915_mutex_lock_interruptible(dev);
|
|
|
|
if (ret)
|
|
|
|
goto pre_mutex_err;
|
|
|
|
|
2013-07-09 14:51:37 +00:00
|
|
|
if (dev_priv->ums.mm_suspended) {
|
2010-11-25 18:00:26 +00:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto pre_mutex_err;
|
|
|
|
}
|
|
|
|
|
2013-12-18 15:37:49 +00:00
|
|
|
ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
|
2014-01-03 05:50:27 +00:00
|
|
|
if (IS_ERR(ctx)) {
|
2013-11-26 14:14:33 +00:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
drm/i915: Get context early in execbuf
We need to have the address space when reserving space for the objects.
Since the address space and context are tied together, and reserve
occurs before context switch (for good reason), we must lookup our
context earlier in the process.
This leaves some room for optimizations where we no longer need to use
ctx_id in certain places. This will be addressed in a subsequent patch.
Important tricky bit:
Because slow relocations during execbuffer drop struct_mutex
Perhaps it would be best to acquire the reference when we get the
context, but I'll save that for another day (note I have written the
patch before, and I found the changes required to be uglier than this).
Note that since we currently access everything via context id, and not
the data structure this is fine, though not desirable. The next change
attempts to get the context only once via the context ID idr lookup, and
as such, the following can happen:
CTX-A is created, refcount = 1
CTX-A execbuf, mutex dropped
close IOCTL called on CTX-A, refcount = 0
CTX-A resumes in execbuf.
v2: Rebased on top of
commit b6359918b885da7c7b58c050674278dbd06020ab
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Wed Oct 30 15:44:16 2013 +0200
drm/i915: add i915_get_reset_stats_ioctl
v3: Rebased on top of
commit 25b3dfc87bff80317d67ddd2cd4cfb91e6fe7d79
Author: Mika Westerberg <mika.westerberg@linux.intel.com>
Date: Tue Nov 12 11:57:30 2013 +0200
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Tue Nov 26 16:14:33 2013 +0200
drm/i915: check context reset stats before relocations
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:11:21 +00:00
|
|
|
ret = PTR_ERR(ctx);
|
2013-11-26 14:14:33 +00:00
|
|
|
goto pre_mutex_err;
|
drm/i915: Get context early in execbuf
We need to have the address space when reserving space for the objects.
Since the address space and context are tied together, and reserve
occurs before context switch (for good reason), we must lookup our
context earlier in the process.
This leaves some room for optimizations where we no longer need to use
ctx_id in certain places. This will be addressed in a subsequent patch.
Important tricky bit:
Because slow relocations during execbuffer drop struct_mutex
Perhaps it would be best to acquire the reference when we get the
context, but I'll save that for another day (note I have written the
patch before, and I found the changes required to be uglier than this).
Note that since we currently access everything via context id, and not
the data structure this is fine, though not desirable. The next change
attempts to get the context only once via the context ID idr lookup, and
as such, the following can happen:
CTX-A is created, refcount = 1
CTX-A execbuf, mutex dropped
close IOCTL called on CTX-A, refcount = 0
CTX-A resumes in execbuf.
v2: Rebased on top of
commit b6359918b885da7c7b58c050674278dbd06020ab
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Wed Oct 30 15:44:16 2013 +0200
drm/i915: add i915_get_reset_stats_ioctl
v3: Rebased on top of
commit 25b3dfc87bff80317d67ddd2cd4cfb91e6fe7d79
Author: Mika Westerberg <mika.westerberg@linux.intel.com>
Date: Tue Nov 12 11:57:30 2013 +0200
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Tue Nov 26 16:14:33 2013 +0200
drm/i915: check context reset stats before relocations
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:11:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
i915_gem_context_reference(ctx);
|
|
|
|
|
2013-12-06 22:11:26 +00:00
|
|
|
vm = ctx->vm;
|
|
|
|
if (!USES_FULL_PPGTT(dev))
|
|
|
|
vm = &dev_priv->gtt.base;
|
2013-11-26 14:14:33 +00:00
|
|
|
|
2013-11-25 17:54:38 +00:00
|
|
|
eb = eb_create(args);
|
2010-12-08 10:38:14 +00:00
|
|
|
if (eb == NULL) {
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto pre_mutex_err;
|
|
|
|
}
|
|
|
|
|
2010-11-25 18:00:26 +00:00
|
|
|
/* Look up object handles */
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
ret = eb_lookup_vmas(eb, exec, args, vm, file);
|
2013-01-08 10:53:14 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2011-01-10 17:35:37 +00:00
|
|
|
/* take note of the batch buffer before we might reorder the lists */
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
|
2011-01-10 17:35:37 +00:00
|
|
|
|
2010-11-25 18:00:26 +00:00
|
|
|
/* Move the objects en-masse into the GTT, evicting if necessary. */
|
2013-01-17 21:23:36 +00:00
|
|
|
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
|
2010-11-25 18:00:26 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
/* The objects are in their final locations, apply the relocations. */
|
2013-01-17 21:23:36 +00:00
|
|
|
if (need_relocs)
|
2013-11-25 17:54:38 +00:00
|
|
|
ret = i915_gem_execbuffer_relocate(eb);
|
2010-11-25 18:00:26 +00:00
|
|
|
if (ret) {
|
|
|
|
if (ret == -EFAULT) {
|
2013-01-17 21:23:36 +00:00
|
|
|
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
eb, exec);
|
2010-11-25 18:00:26 +00:00
|
|
|
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
|
|
|
|
}
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the pending read domains for the batch buffer to COMMAND */
|
|
|
|
if (batch_obj->base.pending_write_domain) {
|
2012-01-31 20:08:14 +00:00
|
|
|
DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
|
2010-11-25 18:00:26 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
|
|
|
|
|
2012-10-17 11:09:54 +00:00
|
|
|
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
|
|
|
|
* batch" bit. Hence we need to pin secure batches into the global gtt.
|
2013-11-03 04:07:26 +00:00
|
|
|
* hsw should have this fixed, but bdw mucks it up again. */
|
drm/i915: Create bind/unbind abstraction for VMAs
To sum up what goes on here, we abstract the vma binding, similarly to
the previous object binding. This helps for distinguishing legacy
binding, versus modern binding. To keep the code churn as minimal as
possible, I am leaving in insert_entries(). It serves as the per
platform pte writing basically. bind_vma and insert_entries do share a
lot of similarities, and I did have designs to combine the two, but as
mentioned already... too much churn in an already massive patchset.
What follows are the 3 commits which existed discretely in the original
submissions. Upon rebasing on Broadwell support, it became clear that
separation was not good, and only made for more error prone code. Below
are the 3 commit messages with all their history.
drm/i915: Add bind/unbind object functions to VMA
drm/i915: Use the new vm [un]bind functions
drm/i915: reduce vm->insert_entries() usage
drm/i915: Add bind/unbind object functions to VMA
As we plumb the code with more VM information, it has become more
obvious that the easiest way to deal with bind and unbind is to simply
put the function pointers in the vm, and let those choose the correct
way to handle the page table updates. This change allows many places in
the code to simply be vm->bind, and not have to worry about
distinguishing PPGTT vs GGTT.
Notice that this patch has no impact on functionality. I've decided to
save the actual change until the next patch because I think it's easier
to review that way. I'm happy to squash the two, or let Daniel do it on
merge.
v2:
Make ggtt handle the quirky aliasing ppgtt
Add flags to bind object to support above
Don't ever call bind/unbind directly for PPGTT until we have real, full
PPGTT (use NULLs to assert this)
Make sure we rebind the ggtt if there already is a ggtt binding. This
happens on set cache levels.
Use VMA for bind/unbind (Daniel, Ben)
v3: Reorganize ggtt_vma_bind to be more concise and easier to read
(Ville). Change logic in unbind to only unbind ggtt when there is a
global mapping, and to remove a redundant check if the aliasing ppgtt
exists.
v4: Make the bind function a bit smarter about the cache levels to avoid
unnecessary multiple remaps. "I accept it is a wart, I think unifying
the pin_vma / bind_vma could be unified later" (Chris)
Removed the git notes, and put version info here. (Daniel)
v5: Update the comment to not suck (Chris)
v6:
Move bind/unbind to the VMA. It makes more sense in the VMA structure
(always has, but I was previously lazy). With this change, it will allow
us to keep a distinct insert_entries.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
drm/i915: Use the new vm [un]bind functions
Building on the last patch which created the new function pointers in
the VM for bind/unbind, here we actually put those new function pointers
to use.
Split out as a separate patch to aid in review. I'm fine with squashing
into the previous patch if people request it.
v2: Updated to address the smart ggtt which can do aliasing as needed
Make sure we bind to global gtt when mappable and fenceable. I thought
we could get away without this initialy, but we cannot.
v3: Make the global GTT binding explicitly use the ggtt VM for
bind_vma(). While at it, use the new ggtt_vma helper (Chris)
At this point the original mailing list thread diverges. ie.
v4^:
use target_obj instead of obj for gen6 relocate_entry
vma->bind_vma() can be called safely during pin. So simply do that
instead of the complicated conditionals.
Don't restore PPGTT bound objects on resume path
Bug fix in resume path for globally bound Bos
Properly handle secure dispatch
Rebased on vma bind/unbind conversion
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
drm/i915: reduce vm->insert_entries() usage
FKA: drm/i915: eliminate vm->insert_entries()
With bind/unbind function pointers in place, we no longer need
insert_entries. We could, and want, to remove clear_range, however it's
not totally easy at this point. Since it's used in a couple of place
still that don't only deal in objects: setup, ppgtt init, and restore
gtt mappings.
v2: Don't actually remove insert_entries, just limit its usage. It will
be useful when we introduce gen8. It will always be called from the vma
bind/unbind.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v1)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:10:56 +00:00
|
|
|
if (flags & I915_DISPATCH_SECURE &&
|
|
|
|
!batch_obj->has_global_gtt_mapping) {
|
|
|
|
/* When we have multiple VMs, we'll need to make sure that we
|
|
|
|
* allocate space first */
|
|
|
|
struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
|
|
|
|
BUG_ON(!vma);
|
|
|
|
vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
|
|
|
|
}
|
2012-10-17 11:09:54 +00:00
|
|
|
|
2013-12-06 22:11:26 +00:00
|
|
|
if (flags & I915_DISPATCH_SECURE)
|
|
|
|
exec_start += i915_gem_obj_ggtt_offset(batch_obj);
|
|
|
|
else
|
|
|
|
exec_start += i915_gem_obj_offset(batch_obj, vm);
|
2012-10-17 11:09:54 +00:00
|
|
|
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
|
2010-11-25 19:32:06 +00:00
|
|
|
if (ret)
|
2010-11-25 18:00:26 +00:00
|
|
|
goto err;
|
|
|
|
|
drm/i915: Get context early in execbuf
We need to have the address space when reserving space for the objects.
Since the address space and context are tied together, and reserve
occurs before context switch (for good reason), we must lookup our
context earlier in the process.
This leaves some room for optimizations where we no longer need to use
ctx_id in certain places. This will be addressed in a subsequent patch.
Important tricky bit:
Because slow relocations during execbuffer drop struct_mutex
Perhaps it would be best to acquire the reference when we get the
context, but I'll save that for another day (note I have written the
patch before, and I found the changes required to be uglier than this).
Note that since we currently access everything via context id, and not
the data structure this is fine, though not desirable. The next change
attempts to get the context only once via the context ID idr lookup, and
as such, the following can happen:
CTX-A is created, refcount = 1
CTX-A execbuf, mutex dropped
close IOCTL called on CTX-A, refcount = 0
CTX-A resumes in execbuf.
v2: Rebased on top of
commit b6359918b885da7c7b58c050674278dbd06020ab
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Wed Oct 30 15:44:16 2013 +0200
drm/i915: add i915_get_reset_stats_ioctl
v3: Rebased on top of
commit 25b3dfc87bff80317d67ddd2cd4cfb91e6fe7d79
Author: Mika Westerberg <mika.westerberg@linux.intel.com>
Date: Tue Nov 12 11:57:30 2013 +0200
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Tue Nov 26 16:14:33 2013 +0200
drm/i915: check context reset stats before relocations
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:11:21 +00:00
|
|
|
ret = i915_switch_context(ring, file, ctx);
|
2012-07-23 19:33:55 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
2011-12-13 03:21:57 +00:00
|
|
|
if (ring == &dev_priv->ring[RCS] &&
|
|
|
|
mode != dev_priv->relative_constants_mode) {
|
|
|
|
ret = intel_ring_begin(ring, 4);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
intel_ring_emit(ring, MI_NOOP);
|
|
|
|
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
|
|
|
intel_ring_emit(ring, INSTPM);
|
2011-12-13 03:21:58 +00:00
|
|
|
intel_ring_emit(ring, mask << 16 | mode);
|
2011-12-13 03:21:57 +00:00
|
|
|
intel_ring_advance(ring);
|
|
|
|
|
|
|
|
dev_priv->relative_constants_mode = mode;
|
|
|
|
}
|
|
|
|
|
2012-01-03 17:23:29 +00:00
|
|
|
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
|
|
|
|
ret = i915_reset_gen7_sol_offsets(dev, ring);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2013-12-06 22:11:26 +00:00
|
|
|
|
2010-11-30 14:10:25 +00:00
|
|
|
exec_len = args->batch_len;
|
|
|
|
if (cliprects) {
|
|
|
|
for (i = 0; i < args->num_cliprects; i++) {
|
|
|
|
ret = i915_emit_box(dev, &cliprects[i],
|
|
|
|
args->DR1, args->DR4);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
ret = ring->dispatch_execbuffer(ring,
|
2012-10-17 11:09:54 +00:00
|
|
|
exec_start, exec_len,
|
|
|
|
flags);
|
2010-11-30 14:10:25 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
} else {
|
2012-10-17 11:09:54 +00:00
|
|
|
ret = ring->dispatch_execbuffer(ring,
|
|
|
|
exec_start, exec_len,
|
|
|
|
flags);
|
2010-11-30 14:10:25 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2012-11-27 16:22:52 +00:00
|
|
|
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
|
|
|
|
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
|
2013-06-12 12:01:39 +00:00
|
|
|
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
|
2010-11-25 18:00:26 +00:00
|
|
|
|
|
|
|
err:
|
drm/i915: Get context early in execbuf
We need to have the address space when reserving space for the objects.
Since the address space and context are tied together, and reserve
occurs before context switch (for good reason), we must lookup our
context earlier in the process.
This leaves some room for optimizations where we no longer need to use
ctx_id in certain places. This will be addressed in a subsequent patch.
Important tricky bit:
Because slow relocations during execbuffer drop struct_mutex
Perhaps it would be best to acquire the reference when we get the
context, but I'll save that for another day (note I have written the
patch before, and I found the changes required to be uglier than this).
Note that since we currently access everything via context id, and not
the data structure this is fine, though not desirable. The next change
attempts to get the context only once via the context ID idr lookup, and
as such, the following can happen:
CTX-A is created, refcount = 1
CTX-A execbuf, mutex dropped
close IOCTL called on CTX-A, refcount = 0
CTX-A resumes in execbuf.
v2: Rebased on top of
commit b6359918b885da7c7b58c050674278dbd06020ab
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Wed Oct 30 15:44:16 2013 +0200
drm/i915: add i915_get_reset_stats_ioctl
v3: Rebased on top of
commit 25b3dfc87bff80317d67ddd2cd4cfb91e6fe7d79
Author: Mika Westerberg <mika.westerberg@linux.intel.com>
Date: Tue Nov 12 11:57:30 2013 +0200
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Tue Nov 26 16:14:33 2013 +0200
drm/i915: check context reset stats before relocations
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:11:21 +00:00
|
|
|
/* the request owns the ref now */
|
|
|
|
i915_gem_context_unreference(ctx);
|
2010-12-08 10:38:14 +00:00
|
|
|
eb_destroy(eb);
|
2010-11-25 18:00:26 +00:00
|
|
|
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
|
|
pre_mutex_err:
|
|
|
|
kfree(cliprects);
|
2013-11-27 20:20:34 +00:00
|
|
|
|
|
|
|
/* intel_gpu_busy should also get a ref, so it will free when the device
|
|
|
|
* is really idle. */
|
|
|
|
intel_runtime_pm_put(dev_priv);
|
2010-11-25 18:00:26 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Legacy execbuffer just creates an exec2 list from the original exec object
|
|
|
|
* list array and passes it to the real function.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
i915_gem_execbuffer(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_execbuffer *args = data;
|
|
|
|
struct drm_i915_gem_execbuffer2 exec2;
|
|
|
|
struct drm_i915_gem_exec_object *exec_list = NULL;
|
|
|
|
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
if (args->buffer_count < 1) {
|
2012-01-31 20:08:14 +00:00
|
|
|
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
|
2010-11-25 18:00:26 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy in the exec list from userland */
|
|
|
|
exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
|
|
|
|
exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
|
|
|
|
if (exec_list == NULL || exec2_list == NULL) {
|
2012-01-31 20:08:14 +00:00
|
|
|
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
|
2010-11-25 18:00:26 +00:00
|
|
|
args->buffer_count);
|
|
|
|
drm_free_large(exec_list);
|
|
|
|
drm_free_large(exec2_list);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
ret = copy_from_user(exec_list,
|
2013-02-22 14:12:51 +00:00
|
|
|
to_user_ptr(args->buffers_ptr),
|
2010-11-25 18:00:26 +00:00
|
|
|
sizeof(*exec_list) * args->buffer_count);
|
|
|
|
if (ret != 0) {
|
2012-01-31 20:08:14 +00:00
|
|
|
DRM_DEBUG("copy %d exec entries failed %d\n",
|
2010-11-25 18:00:26 +00:00
|
|
|
args->buffer_count, ret);
|
|
|
|
drm_free_large(exec_list);
|
|
|
|
drm_free_large(exec2_list);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
|
|
|
exec2_list[i].handle = exec_list[i].handle;
|
|
|
|
exec2_list[i].relocation_count = exec_list[i].relocation_count;
|
|
|
|
exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
|
|
|
|
exec2_list[i].alignment = exec_list[i].alignment;
|
|
|
|
exec2_list[i].offset = exec_list[i].offset;
|
|
|
|
if (INTEL_INFO(dev)->gen < 4)
|
|
|
|
exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
|
|
|
|
else
|
|
|
|
exec2_list[i].flags = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
exec2.buffers_ptr = args->buffers_ptr;
|
|
|
|
exec2.buffer_count = args->buffer_count;
|
|
|
|
exec2.batch_start_offset = args->batch_start_offset;
|
|
|
|
exec2.batch_len = args->batch_len;
|
|
|
|
exec2.DR1 = args->DR1;
|
|
|
|
exec2.DR4 = args->DR4;
|
|
|
|
exec2.num_cliprects = args->num_cliprects;
|
|
|
|
exec2.cliprects_ptr = args->cliprects_ptr;
|
|
|
|
exec2.flags = I915_EXEC_RENDER;
|
2012-06-04 21:42:55 +00:00
|
|
|
i915_execbuffer2_set_context_id(exec2, 0);
|
2010-11-25 18:00:26 +00:00
|
|
|
|
drm/i915: Get context early in execbuf
We need to have the address space when reserving space for the objects.
Since the address space and context are tied together, and reserve
occurs before context switch (for good reason), we must lookup our
context earlier in the process.
This leaves some room for optimizations where we no longer need to use
ctx_id in certain places. This will be addressed in a subsequent patch.
Important tricky bit:
Because slow relocations during execbuffer drop struct_mutex
Perhaps it would be best to acquire the reference when we get the
context, but I'll save that for another day (note I have written the
patch before, and I found the changes required to be uglier than this).
Note that since we currently access everything via context id, and not
the data structure this is fine, though not desirable. The next change
attempts to get the context only once via the context ID idr lookup, and
as such, the following can happen:
CTX-A is created, refcount = 1
CTX-A execbuf, mutex dropped
close IOCTL called on CTX-A, refcount = 0
CTX-A resumes in execbuf.
v2: Rebased on top of
commit b6359918b885da7c7b58c050674278dbd06020ab
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Wed Oct 30 15:44:16 2013 +0200
drm/i915: add i915_get_reset_stats_ioctl
v3: Rebased on top of
commit 25b3dfc87bff80317d67ddd2cd4cfb91e6fe7d79
Author: Mika Westerberg <mika.westerberg@linux.intel.com>
Date: Tue Nov 12 11:57:30 2013 +0200
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Tue Nov 26 16:14:33 2013 +0200
drm/i915: check context reset stats before relocations
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:11:21 +00:00
|
|
|
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
|
2010-11-25 18:00:26 +00:00
|
|
|
if (!ret) {
|
|
|
|
/* Copy the new buffer offsets back to the user's exec list. */
|
|
|
|
for (i = 0; i < args->buffer_count; i++)
|
|
|
|
exec_list[i].offset = exec2_list[i].offset;
|
|
|
|
/* ... and back out to userspace */
|
2013-02-22 14:12:51 +00:00
|
|
|
ret = copy_to_user(to_user_ptr(args->buffers_ptr),
|
2010-11-25 18:00:26 +00:00
|
|
|
exec_list,
|
|
|
|
sizeof(*exec_list) * args->buffer_count);
|
|
|
|
if (ret) {
|
|
|
|
ret = -EFAULT;
|
2012-01-31 20:08:14 +00:00
|
|
|
DRM_DEBUG("failed to copy %d exec entries "
|
2010-11-25 18:00:26 +00:00
|
|
|
"back to user (%d)\n",
|
|
|
|
args->buffer_count, ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_free_large(exec_list);
|
|
|
|
drm_free_large(exec2_list);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_execbuffer2 *args = data;
|
|
|
|
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
|
|
|
|
int ret;
|
|
|
|
|
2012-04-23 08:06:41 +00:00
|
|
|
if (args->buffer_count < 1 ||
|
|
|
|
args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
|
2012-01-31 20:08:14 +00:00
|
|
|
DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
|
2010-11-25 18:00:26 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2011-02-21 12:54:48 +00:00
|
|
|
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
|
2013-01-08 10:53:13 +00:00
|
|
|
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
|
2011-02-21 12:54:48 +00:00
|
|
|
if (exec2_list == NULL)
|
|
|
|
exec2_list = drm_malloc_ab(sizeof(*exec2_list),
|
|
|
|
args->buffer_count);
|
2010-11-25 18:00:26 +00:00
|
|
|
if (exec2_list == NULL) {
|
2012-01-31 20:08:14 +00:00
|
|
|
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
|
2010-11-25 18:00:26 +00:00
|
|
|
args->buffer_count);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
ret = copy_from_user(exec2_list,
|
2013-02-22 14:12:51 +00:00
|
|
|
to_user_ptr(args->buffers_ptr),
|
2010-11-25 18:00:26 +00:00
|
|
|
sizeof(*exec2_list) * args->buffer_count);
|
|
|
|
if (ret != 0) {
|
2012-01-31 20:08:14 +00:00
|
|
|
DRM_DEBUG("copy %d exec entries failed %d\n",
|
2010-11-25 18:00:26 +00:00
|
|
|
args->buffer_count, ret);
|
|
|
|
drm_free_large(exec2_list);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
drm/i915: Get context early in execbuf
We need to have the address space when reserving space for the objects.
Since the address space and context are tied together, and reserve
occurs before context switch (for good reason), we must lookup our
context earlier in the process.
This leaves some room for optimizations where we no longer need to use
ctx_id in certain places. This will be addressed in a subsequent patch.
Important tricky bit:
Because slow relocations during execbuffer drop struct_mutex
Perhaps it would be best to acquire the reference when we get the
context, but I'll save that for another day (note I have written the
patch before, and I found the changes required to be uglier than this).
Note that since we currently access everything via context id, and not
the data structure this is fine, though not desirable. The next change
attempts to get the context only once via the context ID idr lookup, and
as such, the following can happen:
CTX-A is created, refcount = 1
CTX-A execbuf, mutex dropped
close IOCTL called on CTX-A, refcount = 0
CTX-A resumes in execbuf.
v2: Rebased on top of
commit b6359918b885da7c7b58c050674278dbd06020ab
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Wed Oct 30 15:44:16 2013 +0200
drm/i915: add i915_get_reset_stats_ioctl
v3: Rebased on top of
commit 25b3dfc87bff80317d67ddd2cd4cfb91e6fe7d79
Author: Mika Westerberg <mika.westerberg@linux.intel.com>
Date: Tue Nov 12 11:57:30 2013 +0200
Author: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Tue Nov 26 16:14:33 2013 +0200
drm/i915: check context reset stats before relocations
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-12-06 22:11:21 +00:00
|
|
|
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
|
2010-11-25 18:00:26 +00:00
|
|
|
if (!ret) {
|
|
|
|
/* Copy the new buffer offsets back to the user's exec list. */
|
2013-02-22 14:12:51 +00:00
|
|
|
ret = copy_to_user(to_user_ptr(args->buffers_ptr),
|
2010-11-25 18:00:26 +00:00
|
|
|
exec2_list,
|
|
|
|
sizeof(*exec2_list) * args->buffer_count);
|
|
|
|
if (ret) {
|
|
|
|
ret = -EFAULT;
|
2012-01-31 20:08:14 +00:00
|
|
|
DRM_DEBUG("failed to copy %d exec entries "
|
2010-11-25 18:00:26 +00:00
|
|
|
"back to user (%d)\n",
|
|
|
|
args->buffer_count, ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_free_large(exec2_list);
|
|
|
|
return ret;
|
|
|
|
}
|