2010-11-25 18:00:26 +00:00
|
|
|
/*
|
2019-05-28 09:29:49 +00:00
|
|
|
* SPDX-License-Identifier: MIT
|
2010-11-25 18:00:26 +00:00
|
|
|
*
|
2019-05-28 09:29:49 +00:00
|
|
|
* Copyright © 2008,2010 Intel Corporation
|
2010-11-25 18:00:26 +00:00
|
|
|
*/
|
|
|
|
|
2018-11-12 06:40:08 +00:00
|
|
|
#include <linux/intel-iommu.h>
|
2019-08-11 08:06:32 +00:00
|
|
|
#include <linux/dma-resv.h>
|
2017-01-27 09:40:08 +00:00
|
|
|
#include <linux/sync_file.h>
|
2016-08-04 15:32:42 +00:00
|
|
|
#include <linux/uaccess.h>
|
|
|
|
|
2017-08-15 14:57:33 +00:00
|
|
|
#include <drm/drm_syncobj.h>
|
2016-08-04 15:32:42 +00:00
|
|
|
|
2019-06-13 08:44:16 +00:00
|
|
|
#include "display/intel_frontbuffer.h"
|
|
|
|
|
2019-05-28 09:29:43 +00:00
|
|
|
#include "gem/i915_gem_ioctls.h"
|
2019-05-28 09:29:49 +00:00
|
|
|
#include "gt/intel_context.h"
|
2020-12-16 13:54:52 +00:00
|
|
|
#include "gt/intel_gpu_commands.h"
|
2019-06-21 07:08:02 +00:00
|
|
|
#include "gt/intel_gt.h"
|
2020-04-30 11:18:12 +00:00
|
|
|
#include "gt/intel_gt_buffer_pool.h"
|
2019-04-25 05:01:43 +00:00
|
|
|
#include "gt/intel_gt_pm.h"
|
2019-10-24 10:03:44 +00:00
|
|
|
#include "gt/intel_ring.h"
|
2019-04-25 05:01:43 +00:00
|
|
|
|
2019-08-06 10:07:30 +00:00
|
|
|
#include "i915_drv.h"
|
2017-02-22 11:40:48 +00:00
|
|
|
#include "i915_gem_clflush.h"
|
2019-05-28 09:29:49 +00:00
|
|
|
#include "i915_gem_context.h"
|
2019-08-06 10:07:30 +00:00
|
|
|
#include "i915_gem_ioctls.h"
|
2019-12-11 23:08:57 +00:00
|
|
|
#include "i915_sw_fence_work.h"
|
2010-11-25 18:00:26 +00:00
|
|
|
#include "i915_trace.h"
|
2020-08-04 08:59:53 +00:00
|
|
|
#include "i915_user_extensions.h"
|
2021-03-23 15:49:52 +00:00
|
|
|
#include "i915_memcpy.h"
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma {
|
|
|
|
struct i915_vma *vma;
|
|
|
|
unsigned int flags;
|
|
|
|
|
|
|
|
/** This vma's place in the execbuf reservation list */
|
|
|
|
struct drm_i915_gem_exec_object2 *exec;
|
|
|
|
struct list_head bind_link;
|
|
|
|
struct list_head reloc_link;
|
|
|
|
|
|
|
|
struct hlist_node node;
|
|
|
|
u32 handle;
|
|
|
|
};
|
|
|
|
|
2020-09-08 05:41:17 +00:00
|
|
|
enum {
|
|
|
|
FORCE_CPU_RELOC = 1,
|
|
|
|
FORCE_GTT_RELOC,
|
|
|
|
FORCE_GPU_RELOC,
|
|
|
|
#define DBG_FORCE_RELOC 0 /* choose one of the above! */
|
|
|
|
};
|
|
|
|
|
2021-03-23 15:49:59 +00:00
|
|
|
/* __EXEC_OBJECT_NO_RESERVE is BIT(31), defined in i915_vma.h */
|
|
|
|
#define __EXEC_OBJECT_HAS_PIN BIT(30)
|
|
|
|
#define __EXEC_OBJECT_HAS_FENCE BIT(29)
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
#define __EXEC_OBJECT_USERPTR_INIT BIT(28)
|
|
|
|
#define __EXEC_OBJECT_NEEDS_MAP BIT(27)
|
|
|
|
#define __EXEC_OBJECT_NEEDS_BIAS BIT(26)
|
|
|
|
#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 26) /* all of the above + */
|
2020-08-19 14:08:44 +00:00
|
|
|
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
|
|
|
#define __EXEC_HAS_RELOC BIT(31)
|
2020-08-19 14:08:52 +00:00
|
|
|
#define __EXEC_ENGINE_PINNED BIT(30)
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
#define __EXEC_USERPTR_USED BIT(29)
|
|
|
|
#define __EXEC_INTERNAL_FLAGS (~0u << 29)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
#define UPDATE PIN_OFFSET_FIXED
|
drm/i915: Prevent negative relocation deltas from wrapping
This is pure evil. Userspace, I'm looking at you SNA, repacks batch
buffers on the fly after generation as they are being passed to the
kernel for execution. These batches also contain self-referenced
relocations as a single buffer encompasses the state commands, kernels,
vertices and sampler. During generation the buffers are placed at known
offsets within the full batch, and then the relocation deltas (as passed
to the kernel) are tweaked as the batch is repacked into a smaller buffer.
This means that userspace is passing negative relocations deltas, which
subsequently wrap to large values if the batch is at a low address. The
GPU hangs when it then tries to use the large value as a base for its
address offsets, rather than wrapping back to the real value (as one
would hope). As the GPU uses positive offsets from the base, we can
treat the relocation address as the minimum address read by the GPU.
For the upper bound, we trust that userspace will not read beyond the
end of the buffer.
So, how do we fix negative relocations from wrapping? We can either
check that every relocation looks valid when we write it, and then
position each object such that we prevent the offset wraparound, or we
just special-case the self-referential behaviour of SNA and force all
batches to be above 256k. Daniel prefers the latter approach.
This fixes a GPU hang when it tries to use an address (relocation +
offset) greater than the GTT size. The issue would occur quite easily
with full-ppgtt as each fd gets its own VM space, so low offsets would
often be handed out. However, with the rearrangement of the low GTT due
to capturing the BIOS framebuffer, it is already affecting kernels 3.15
onwards. I think only IVB+ is susceptible to this bug, but the workaround
should only kick in rarely, so it seems sensible to always apply it.
v3: Use a bias for batch buffers to prevent small negative delta relocations
from wrapping.
v4 from Daniel:
- s/BIAS/BATCH_OFFSET_BIAS/
- Extract eb_vma_misplaced/i915_vma_misplaced since the conditions
were growing rather cumbersome.
- Add a comment to eb_get_batch explaining why we do this.
- Apply the batch offset bias everywhere but mention that we've only
observed it on gen7 gpus.
- Drop PIN_OFFSET_FIX for now, that slipped in from a feature patch.
v5: Add static to eb_get_batch, spotted by 0-day tester.
Testcase: igt/gem_bad_reloc
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=78533
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> (v3)
Cc: stable@vger.kernel.org
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-05-23 06:48:08 +00:00
|
|
|
|
|
|
|
#define BATCH_OFFSET_BIAS (256*1024)
|
2013-11-26 11:23:15 +00:00
|
|
|
|
2017-06-15 08:14:33 +00:00
|
|
|
#define __I915_EXEC_ILLEGAL_FLAGS \
|
2018-08-03 23:24:43 +00:00
|
|
|
(__I915_EXEC_UNKNOWN_FLAGS | \
|
|
|
|
I915_EXEC_CONSTANTS_MASK | \
|
|
|
|
I915_EXEC_RESOURCE_STREAMER)
|
2016-08-02 21:50:38 +00:00
|
|
|
|
2018-06-21 08:01:50 +00:00
|
|
|
/* Catch emission of unexpected errors for CI! */
|
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
|
|
|
|
#undef EINVAL
|
|
|
|
#define EINVAL ({ \
|
|
|
|
DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
|
|
|
|
22; \
|
|
|
|
})
|
|
|
|
#endif
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/**
|
|
|
|
* DOC: User command execution
|
|
|
|
*
|
|
|
|
* Userspace submits commands to be executed on the GPU as an instruction
|
|
|
|
* stream within a GEM object we call a batchbuffer. This instructions may
|
|
|
|
* refer to other GEM objects containing auxiliary state such as kernels,
|
|
|
|
* samplers, render targets and even secondary batchbuffers. Userspace does
|
|
|
|
* not know where in the GPU memory these objects reside and so before the
|
|
|
|
* batchbuffer is passed to the GPU for execution, those addresses in the
|
|
|
|
* batchbuffer and auxiliary objects are updated. This is known as relocation,
|
|
|
|
* or patching. To try and avoid having to relocate each object on the next
|
|
|
|
* execution, userspace is told the location of those objects in this pass,
|
|
|
|
* but this remains just a hint as the kernel may choose a new location for
|
|
|
|
* any object in the future.
|
|
|
|
*
|
2018-04-06 08:05:57 +00:00
|
|
|
* At the level of talking to the hardware, submitting a batchbuffer for the
|
|
|
|
* GPU to execute is to add content to a buffer from which the HW
|
|
|
|
* command streamer is reading.
|
|
|
|
*
|
|
|
|
* 1. Add a command to load the HW context. For Logical Ring Contexts, i.e.
|
|
|
|
* Execlists, this command is not placed on the same buffer as the
|
|
|
|
* remaining items.
|
|
|
|
*
|
|
|
|
* 2. Add a command to invalidate caches to the buffer.
|
|
|
|
*
|
|
|
|
* 3. Add a batchbuffer start command to the buffer; the start command is
|
|
|
|
* essentially a token together with the GPU address of the batchbuffer
|
|
|
|
* to be executed.
|
|
|
|
*
|
|
|
|
* 4. Add a pipeline flush to the buffer.
|
|
|
|
*
|
|
|
|
* 5. Add a memory write command to the buffer to record when the GPU
|
|
|
|
* is done executing the batchbuffer. The memory write writes the
|
|
|
|
* global sequence number of the request, ``i915_request::global_seqno``;
|
|
|
|
* the i915 driver uses the current value in the register to determine
|
|
|
|
* if the GPU has completed the batchbuffer.
|
|
|
|
*
|
|
|
|
* 6. Add a user interrupt command to the buffer. This command instructs
|
|
|
|
* the GPU to issue an interrupt when the command, pipeline flush and
|
|
|
|
* memory write are completed.
|
|
|
|
*
|
|
|
|
* 7. Inform the hardware of the additional commands added to the buffer
|
|
|
|
* (by updating the tail pointer).
|
|
|
|
*
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
* Processing an execbuf ioctl is conceptually split up into a few phases.
|
|
|
|
*
|
|
|
|
* 1. Validation - Ensure all the pointers, handles and flags are valid.
|
|
|
|
* 2. Reservation - Assign GPU address space for every object
|
|
|
|
* 3. Relocation - Update any addresses to point to the final locations
|
|
|
|
* 4. Serialisation - Order the request with respect to its dependencies
|
|
|
|
* 5. Construction - Construct a request to execute the batchbuffer
|
|
|
|
* 6. Submission (at some point in the future execution)
|
|
|
|
*
|
|
|
|
* Reserving resources for the execbuf is the most complicated phase. We
|
|
|
|
* neither want to have to migrate the object in the address space, nor do
|
|
|
|
* we want to have to update any relocations pointing to this object. Ideally,
|
|
|
|
* we want to leave the object where it is and for all the existing relocations
|
|
|
|
* to match. If the object is given a new address, or if userspace thinks the
|
|
|
|
* object is elsewhere, we have to parse all the relocation entries and update
|
|
|
|
* the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
|
|
|
|
* all the target addresses in all of its objects match the value in the
|
|
|
|
* relocation entries and that they all match the presumed offsets given by the
|
|
|
|
* list of execbuffer objects. Using this knowledge, we know that if we haven't
|
|
|
|
* moved any buffers, all the relocation entries are valid and we can skip
|
|
|
|
* the update. (If userspace is wrong, the likely outcome is an impromptu GPU
|
|
|
|
* hang.) The requirement for using I915_EXEC_NO_RELOC are:
|
|
|
|
*
|
|
|
|
* The addresses written in the objects must match the corresponding
|
|
|
|
* reloc.presumed_offset which in turn must match the corresponding
|
|
|
|
* execobject.offset.
|
|
|
|
*
|
|
|
|
* Any render targets written to in the batch must be flagged with
|
|
|
|
* EXEC_OBJECT_WRITE.
|
|
|
|
*
|
|
|
|
* To avoid stalling, execobject.offset should match the current
|
|
|
|
* address of that object within the active context.
|
|
|
|
*
|
|
|
|
* The reservation is done is multiple phases. First we try and keep any
|
|
|
|
* object already bound in its current location - so as long as meets the
|
|
|
|
* constraints imposed by the new execbuffer. Any object left unbound after the
|
|
|
|
* first pass is then fitted into any available idle space. If an object does
|
|
|
|
* not fit, all objects are removed from the reservation and the process rerun
|
|
|
|
* after sorting the objects into a priority order (more difficult to fit
|
|
|
|
* objects are tried first). Failing that, the entire VM is cleared and we try
|
|
|
|
* to fit the execbuf once last time before concluding that it simply will not
|
|
|
|
* fit.
|
|
|
|
*
|
|
|
|
* A small complication to all of this is that we allow userspace not only to
|
|
|
|
* specify an alignment and a size for the object in the address space, but
|
|
|
|
* we also allow userspace to specify the exact offset. This objects are
|
|
|
|
* simpler to place (the location is known a priori) all we have to do is make
|
|
|
|
* sure the space is available.
|
|
|
|
*
|
|
|
|
* Once all the objects are in place, patching up the buried pointers to point
|
|
|
|
* to the final locations is a fairly simple job of walking over the relocation
|
|
|
|
* entry arrays, looking up the right address and rewriting the value into
|
|
|
|
* the object. Simple! ... The relocation entries are stored in user memory
|
|
|
|
* and so to access them we have to copy them into a local buffer. That copy
|
|
|
|
* has to avoid taking any pagefaults as they may lead back to a GEM object
|
|
|
|
* requiring the struct_mutex (i.e. recursive deadlock). So once again we split
|
|
|
|
* the relocation into multiple passes. First we try to do everything within an
|
|
|
|
* atomic context (avoid the pagefaults) which requires that we never wait. If
|
|
|
|
* we detect that we may wait, or if we need to fault, then we have to fallback
|
|
|
|
* to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
|
|
|
|
* bells yet?) Dropping the mutex means that we lose all the state we have
|
|
|
|
* built up so far for the execbuf and we must reset any global data. However,
|
|
|
|
* we do leave the objects pinned in their final locations - which is a
|
|
|
|
* potential issue for concurrent execbufs. Once we have left the mutex, we can
|
|
|
|
* allocate and copy all the relocation entries into a large array at our
|
|
|
|
* leisure, reacquire the mutex, reclaim all the objects and other state and
|
|
|
|
* then proceed to update any incorrect addresses with the objects.
|
|
|
|
*
|
|
|
|
* As we process the relocation entries, we maintain a record of whether the
|
|
|
|
* object is being written to. Using NORELOC, we expect userspace to provide
|
|
|
|
* this information instead. We also check whether we can skip the relocation
|
|
|
|
* by comparing the expected value inside the relocation entry with the target's
|
|
|
|
* final address. If they differ, we have to map the current object and rewrite
|
|
|
|
* the 4 or 8 byte pointer within.
|
|
|
|
*
|
|
|
|
* Serialising an execbuf is quite simple according to the rules of the GEM
|
|
|
|
* ABI. Execution within each context is ordered by the order of submission.
|
|
|
|
* Writes to any GEM object are in order of submission and are exclusive. Reads
|
|
|
|
* from a GEM object are unordered with respect to other reads, but ordered by
|
|
|
|
* writes. A write submitted after a read cannot occur before the read, and
|
|
|
|
* similarly any read submitted after a write cannot occur before the write.
|
|
|
|
* Writes are ordered between engines such that only one write occurs at any
|
|
|
|
* time (completing any reads beforehand) - using semaphores where available
|
|
|
|
* and CPU serialisation otherwise. Other GEM access obey the same rules, any
|
|
|
|
* write (either via mmaps using set-domain, or via pwrite) must flush all GPU
|
|
|
|
* reads before starting, and any read (either using set-domain or pread) must
|
|
|
|
* flush all GPU writes before starting. (Note we only employ a barrier before,
|
|
|
|
* we currently rely on userspace not concurrently starting a new execution
|
|
|
|
* whilst reading or writing to an object. This may be an advantage or not
|
|
|
|
* depending on how much you trust userspace not to shoot themselves in the
|
|
|
|
* foot.) Serialisation may just result in the request being inserted into
|
|
|
|
* a DAG awaiting its turn, but most simple is to wait on the CPU until
|
|
|
|
* all dependencies are resolved.
|
|
|
|
*
|
|
|
|
* After all of that, is just a matter of closing the request and handing it to
|
|
|
|
* the hardware (well, leaving it in a queue to be executed). However, we also
|
|
|
|
* offer the ability for batchbuffers to be run with elevated privileges so
|
|
|
|
* that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
|
|
|
|
* Before any batch is given extra privileges we first must check that it
|
|
|
|
* contains no nefarious instructions, we check that each instruction is from
|
|
|
|
* our whitelist and all registers are also from an allowed list. We first
|
|
|
|
* copy the user's batchbuffer to a shadow (so that the user doesn't have
|
|
|
|
* access to it, either by the CPU or GPU as we scan it) and then parse each
|
|
|
|
* instruction. If everything is ok, we set a flag telling the hardware to run
|
|
|
|
* the batchbuffer in trusted mode, otherwise the ioctl is rejected.
|
|
|
|
*/
|
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
struct eb_fence {
|
|
|
|
struct drm_syncobj *syncobj; /* Use with ptr_mask_bits() */
|
|
|
|
struct dma_fence *dma_fence;
|
|
|
|
u64 value;
|
|
|
|
struct dma_fence_chain *chain_fence;
|
|
|
|
};
|
|
|
|
|
2017-06-15 08:14:33 +00:00
|
|
|
struct i915_execbuffer {
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
struct drm_i915_private *i915; /** i915 backpointer */
|
|
|
|
struct drm_file *file; /** per-file lookup tables and limits */
|
|
|
|
struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
|
|
|
|
struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma *vma;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
|
|
|
struct intel_engine_cs *engine; /** engine to queue the request to */
|
2019-04-25 05:01:43 +00:00
|
|
|
struct intel_context *context; /* logical state for the request */
|
|
|
|
struct i915_gem_context *gem_context; /** caller's context */
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2018-02-21 09:56:36 +00:00
|
|
|
struct i915_request *request; /** our request to build */
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma *batch; /** identity of the batch obj/vma */
|
2019-12-11 23:08:56 +00:00
|
|
|
struct i915_vma *trampoline; /** trampoline used for chaining */
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
|
|
|
/** actual size of execobj[] as we may extend it for the cmdparser */
|
|
|
|
unsigned int buffer_count;
|
|
|
|
|
|
|
|
/** list of vma not yet bound during reservation phase */
|
|
|
|
struct list_head unbound;
|
|
|
|
|
|
|
|
/** list of vma that have execobj.relocation_count */
|
|
|
|
struct list_head relocs;
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
struct i915_gem_ww_ctx ww;
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/**
|
|
|
|
* Track the most recently used object for relocations, as we
|
|
|
|
* frequently have to perform multiple relocations within the same
|
|
|
|
* obj/page
|
|
|
|
*/
|
2017-06-15 08:14:33 +00:00
|
|
|
struct reloc_cache {
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
struct drm_mm_node node; /** temporary GTT binding */
|
2020-09-08 05:41:17 +00:00
|
|
|
unsigned long vaddr; /** Current kmap address */
|
|
|
|
unsigned long page; /** Currently mapped page index */
|
2021-04-13 05:09:59 +00:00
|
|
|
unsigned int graphics_ver; /** Cached value of GRAPHICS_VER */
|
2017-06-15 08:14:33 +00:00
|
|
|
bool use_64bit_reloc : 1;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
bool has_llc : 1;
|
|
|
|
bool has_fence : 1;
|
|
|
|
bool needs_unfenced : 1;
|
2017-06-16 14:05:24 +00:00
|
|
|
|
2018-02-21 09:56:36 +00:00
|
|
|
struct i915_request *rq;
|
2017-06-16 14:05:24 +00:00
|
|
|
u32 *rq_cmd;
|
|
|
|
unsigned int rq_size;
|
2020-08-19 14:08:48 +00:00
|
|
|
struct intel_gt_buffer_pool_node *pool;
|
2017-06-15 08:14:33 +00:00
|
|
|
} reloc_cache;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
struct intel_gt_buffer_pool_node *reloc_pool; /** relocation pool for -EDEADLK handling */
|
2020-08-19 14:08:52 +00:00
|
|
|
struct intel_context *reloc_context;
|
2020-08-19 14:08:48 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
u64 invalid_flags; /** Set of execobj.flags that are invalid */
|
|
|
|
u32 context_flags; /** Set of execobj.flags to insert from the ctx */
|
|
|
|
|
2020-10-15 11:59:54 +00:00
|
|
|
u64 batch_len; /** Length of batch within object */
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
u32 batch_start_offset; /** Location within object of batch */
|
|
|
|
u32 batch_flags; /** Flags composed for emit_bb_start() */
|
2020-08-19 14:08:48 +00:00
|
|
|
struct intel_gt_buffer_pool_node *batch_pool; /** pool node for batch buffer */
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Indicate either the size of the hastable used to resolve
|
|
|
|
* relocation handles, or if negative that we are using a direct
|
|
|
|
* index into the execobj[].
|
|
|
|
*/
|
|
|
|
int lut_size;
|
|
|
|
struct hlist_head *buckets; /** ht for relocation handles */
|
2020-08-04 08:59:53 +00:00
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
struct eb_fence *fences;
|
|
|
|
unsigned long num_fences;
|
2010-12-08 10:38:14 +00:00
|
|
|
};
|
|
|
|
|
2020-08-19 14:08:47 +00:00
|
|
|
static int eb_parse(struct i915_execbuffer *eb);
|
2020-08-19 14:08:52 +00:00
|
|
|
static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb,
|
|
|
|
bool throttle);
|
|
|
|
static void eb_unpin_engine(struct i915_execbuffer *eb);
|
2020-08-19 14:08:47 +00:00
|
|
|
|
2017-08-26 13:56:20 +00:00
|
|
|
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
|
|
|
|
{
|
2018-08-01 16:33:59 +00:00
|
|
|
return intel_engine_requires_cmd_parser(eb->engine) ||
|
2018-08-01 16:45:50 +00:00
|
|
|
(intel_engine_using_cmd_parser(eb->engine) &&
|
|
|
|
eb->args->batch_len);
|
2017-08-26 13:56:20 +00:00
|
|
|
}
|
|
|
|
|
2017-06-15 08:14:33 +00:00
|
|
|
static int eb_create(struct i915_execbuffer *eb)
|
2010-12-08 10:38:14 +00:00
|
|
|
{
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
|
|
|
|
unsigned int size = 1 + ilog2(eb->buffer_count);
|
2017-06-16 14:05:16 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/*
|
|
|
|
* Without a 1:1 association between relocation handles and
|
|
|
|
* the execobject[] index, we instead create a hashtable.
|
|
|
|
* We size it dynamically based on available memory, starting
|
|
|
|
* first with 1:1 assocative hash and scaling back until
|
|
|
|
* the allocation succeeds.
|
|
|
|
*
|
|
|
|
* Later on we use a positive lut_size to indicate we are
|
|
|
|
* using this hashtable, and a negative value to indicate a
|
|
|
|
* direct lookup.
|
|
|
|
*/
|
2017-06-16 14:05:16 +00:00
|
|
|
do {
|
2017-09-01 14:57:28 +00:00
|
|
|
gfp_t flags;
|
2017-06-29 15:04:25 +00:00
|
|
|
|
|
|
|
/* While we can still reduce the allocation size, don't
|
|
|
|
* raise a warning and allow the allocation to fail.
|
|
|
|
* On the last pass though, we want to try as hard
|
|
|
|
* as possible to perform the allocation and warn
|
|
|
|
* if it fails.
|
|
|
|
*/
|
2017-09-13 23:28:29 +00:00
|
|
|
flags = GFP_KERNEL;
|
2017-06-29 15:04:25 +00:00
|
|
|
if (size > 1)
|
|
|
|
flags |= __GFP_NORETRY | __GFP_NOWARN;
|
|
|
|
|
2017-06-16 14:05:16 +00:00
|
|
|
eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
|
2017-06-29 15:04:25 +00:00
|
|
|
flags);
|
2017-06-16 14:05:16 +00:00
|
|
|
if (eb->buckets)
|
|
|
|
break;
|
|
|
|
} while (--size);
|
|
|
|
|
2020-08-19 14:08:44 +00:00
|
|
|
if (unlikely(!size))
|
2017-06-29 15:04:25 +00:00
|
|
|
return -ENOMEM;
|
2013-01-08 10:53:17 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
eb->lut_size = size;
|
2017-06-15 08:14:33 +00:00
|
|
|
} else {
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
eb->lut_size = -eb->buffer_count;
|
2017-06-15 08:14:33 +00:00
|
|
|
}
|
2013-01-08 10:53:17 +00:00
|
|
|
|
2017-06-15 08:14:33 +00:00
|
|
|
return 0;
|
2010-12-08 10:38:14 +00:00
|
|
|
}
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
static bool
|
|
|
|
eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
|
2017-08-16 08:52:06 +00:00
|
|
|
const struct i915_vma *vma,
|
|
|
|
unsigned int flags)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
{
|
|
|
|
if (vma->node.size < entry->pad_to_size)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
|
|
|
|
return true;
|
|
|
|
|
2017-08-16 08:52:06 +00:00
|
|
|
if (flags & EXEC_OBJECT_PINNED &&
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
vma->node.start != entry->offset)
|
|
|
|
return true;
|
|
|
|
|
2017-08-16 08:52:06 +00:00
|
|
|
if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
vma->node.start < BATCH_OFFSET_BIAS)
|
|
|
|
return true;
|
|
|
|
|
2017-08-16 08:52:06 +00:00
|
|
|
if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
|
2020-12-16 09:29:51 +00:00
|
|
|
(vma->node.start + vma->node.size + 4095) >> 32)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return true;
|
|
|
|
|
2017-10-31 10:36:07 +00:00
|
|
|
if (flags & __EXEC_OBJECT_NEEDS_MAP &&
|
|
|
|
!i915_vma_is_map_and_fenceable(vma))
|
|
|
|
return true;
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-04-01 19:41:35 +00:00
|
|
|
static u64 eb_pin_flags(const struct drm_i915_gem_exec_object2 *entry,
|
|
|
|
unsigned int exec_flags)
|
|
|
|
{
|
|
|
|
u64 pin_flags = 0;
|
|
|
|
|
|
|
|
if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
|
|
|
|
pin_flags |= PIN_GLOBAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
|
|
|
|
* limit address to the first 4GBs for unflagged objects.
|
|
|
|
*/
|
|
|
|
if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
|
|
|
|
pin_flags |= PIN_ZONE_4G;
|
|
|
|
|
|
|
|
if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
|
|
|
|
pin_flags |= PIN_MAPPABLE;
|
|
|
|
|
|
|
|
if (exec_flags & EXEC_OBJECT_PINNED)
|
|
|
|
pin_flags |= entry->offset | PIN_OFFSET_FIXED;
|
|
|
|
else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
|
|
|
|
pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
|
|
|
|
|
|
|
|
return pin_flags;
|
|
|
|
}
|
|
|
|
|
2021-03-23 15:49:53 +00:00
|
|
|
static inline int
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
eb_pin_vma(struct i915_execbuffer *eb,
|
2017-08-16 08:52:06 +00:00
|
|
|
const struct drm_i915_gem_exec_object2 *entry,
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma *ev)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
{
|
2020-03-03 20:43:44 +00:00
|
|
|
struct i915_vma *vma = ev->vma;
|
2017-08-16 08:52:06 +00:00
|
|
|
u64 pin_flags;
|
2021-03-23 15:49:53 +00:00
|
|
|
int err;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2017-06-16 14:05:21 +00:00
|
|
|
if (vma->node.size)
|
2017-08-16 08:52:06 +00:00
|
|
|
pin_flags = vma->node.start;
|
2017-06-16 14:05:21 +00:00
|
|
|
else
|
2017-08-16 08:52:06 +00:00
|
|
|
pin_flags = entry->offset & PIN_OFFSET_MASK;
|
2017-06-16 14:05:21 +00:00
|
|
|
|
2017-08-16 08:52:06 +00:00
|
|
|
pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
|
2020-03-03 20:43:44 +00:00
|
|
|
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT))
|
2017-08-16 08:52:06 +00:00
|
|
|
pin_flags |= PIN_GLOBAL;
|
2017-06-16 14:05:21 +00:00
|
|
|
|
2020-04-01 19:41:35 +00:00
|
|
|
/* Attempt to reuse the current location if available */
|
2021-03-23 15:49:53 +00:00
|
|
|
err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, pin_flags);
|
|
|
|
if (err == -EDEADLK)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (unlikely(err)) {
|
2020-04-01 19:41:35 +00:00
|
|
|
if (entry->flags & EXEC_OBJECT_PINNED)
|
2021-03-23 15:49:53 +00:00
|
|
|
return err;
|
2020-04-01 19:41:35 +00:00
|
|
|
|
|
|
|
/* Failing that pick any _free_ space if suitable */
|
2021-03-23 15:49:53 +00:00
|
|
|
err = i915_vma_pin_ww(vma, &eb->ww,
|
2020-08-19 14:08:54 +00:00
|
|
|
entry->pad_to_size,
|
|
|
|
entry->alignment,
|
|
|
|
eb_pin_flags(entry, ev->flags) |
|
2021-03-23 15:49:53 +00:00
|
|
|
PIN_USER | PIN_NOEVICT);
|
|
|
|
if (unlikely(err))
|
|
|
|
return err;
|
2020-04-01 19:41:35 +00:00
|
|
|
}
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2020-03-03 20:43:44 +00:00
|
|
|
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
|
2021-03-23 15:49:53 +00:00
|
|
|
err = i915_vma_pin_fence(vma);
|
|
|
|
if (unlikely(err)) {
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
i915_vma_unpin(vma);
|
2021-03-23 15:49:53 +00:00
|
|
|
return err;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
}
|
|
|
|
|
2017-10-09 08:43:56 +00:00
|
|
|
if (vma->fence)
|
2020-03-03 20:43:44 +00:00
|
|
|
ev->flags |= __EXEC_OBJECT_HAS_FENCE;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
}
|
|
|
|
|
2020-03-03 20:43:44 +00:00
|
|
|
ev->flags |= __EXEC_OBJECT_HAS_PIN;
|
2021-03-23 15:49:53 +00:00
|
|
|
if (eb_vma_misplaced(entry, vma, ev->flags))
|
|
|
|
return -EBADSLT;
|
|
|
|
|
|
|
|
return 0;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:44 +00:00
|
|
|
static inline void
|
|
|
|
eb_unreserve_vma(struct eb_vma *ev)
|
|
|
|
{
|
|
|
|
if (!(ev->flags & __EXEC_OBJECT_HAS_PIN))
|
|
|
|
return;
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE))
|
|
|
|
__i915_vma_unpin_fence(ev->vma);
|
|
|
|
|
|
|
|
__i915_vma_unpin(ev->vma);
|
2020-08-19 14:08:44 +00:00
|
|
|
ev->flags &= ~__EXEC_OBJECT_RESERVED;
|
|
|
|
}
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
static int
|
|
|
|
eb_validate_vma(struct i915_execbuffer *eb,
|
|
|
|
struct drm_i915_gem_exec_object2 *entry,
|
|
|
|
struct i915_vma *vma)
|
2010-12-08 10:38:14 +00:00
|
|
|
{
|
2021-03-17 23:40:11 +00:00
|
|
|
/* Relocations are disallowed for all platforms after TGL-LP. This
|
|
|
|
* also covers all platforms with local memory.
|
|
|
|
*/
|
|
|
|
if (entry->relocation_count &&
|
2021-06-05 15:53:54 +00:00
|
|
|
GRAPHICS_VER(eb->i915) >= 12 && !IS_TIGERLAKE(eb->i915))
|
2021-03-17 23:40:11 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (unlikely(entry->flags & eb->invalid_flags))
|
|
|
|
return -EINVAL;
|
2017-06-15 08:14:34 +00:00
|
|
|
|
2020-03-05 20:35:34 +00:00
|
|
|
if (unlikely(entry->alignment &&
|
|
|
|
!is_power_of_2_u64(entry->alignment)))
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Offset can be used as input (EXEC_OBJECT_PINNED), reject
|
|
|
|
* any non-page-aligned or non-canonical addresses.
|
|
|
|
*/
|
|
|
|
if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
|
2018-10-25 09:18:23 +00:00
|
|
|
entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* pad_to_size was once a reserved field, so sanitize it */
|
|
|
|
if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
|
|
|
|
if (unlikely(offset_in_page(entry->pad_to_size)))
|
|
|
|
return -EINVAL;
|
|
|
|
} else {
|
|
|
|
entry->pad_to_size = 0;
|
2017-06-15 08:14:34 +00:00
|
|
|
}
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/*
|
|
|
|
* From drm_mm perspective address space is continuous,
|
|
|
|
* so from this point we're always using non-canonical
|
|
|
|
* form internally.
|
|
|
|
*/
|
|
|
|
entry->offset = gen8_noncanonical_addr(entry->offset);
|
|
|
|
|
2017-08-16 08:52:06 +00:00
|
|
|
if (!eb->reloc_cache.has_fence) {
|
|
|
|
entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
|
|
|
|
} else {
|
|
|
|
if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
|
|
|
|
eb->reloc_cache.needs_unfenced) &&
|
|
|
|
i915_gem_object_is_tiled(vma->obj))
|
|
|
|
entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(entry->flags & EXEC_OBJECT_PINNED))
|
|
|
|
entry->flags |= eb->context_flags;
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return 0;
|
2010-12-08 10:38:14 +00:00
|
|
|
}
|
|
|
|
|
2020-03-03 20:43:45 +00:00
|
|
|
static void
|
2018-06-10 19:43:09 +00:00
|
|
|
eb_add_vma(struct i915_execbuffer *eb,
|
|
|
|
unsigned int i, unsigned batch_idx,
|
|
|
|
struct i915_vma *vma)
|
2016-08-04 15:32:31 +00:00
|
|
|
{
|
2017-08-16 08:52:06 +00:00
|
|
|
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma *ev = &eb->vma[i];
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
ev->vma = vma;
|
2020-03-03 20:43:44 +00:00
|
|
|
ev->exec = entry;
|
|
|
|
ev->flags = entry->flags;
|
|
|
|
|
2017-06-29 15:04:25 +00:00
|
|
|
if (eb->lut_size > 0) {
|
2020-03-03 20:43:44 +00:00
|
|
|
ev->handle = entry->handle;
|
|
|
|
hlist_add_head(&ev->node,
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
&eb->buckets[hash_32(entry->handle,
|
|
|
|
eb->lut_size)]);
|
2017-06-16 14:05:16 +00:00
|
|
|
}
|
2016-08-04 15:32:31 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (entry->relocation_count)
|
2020-03-03 20:43:44 +00:00
|
|
|
list_add_tail(&ev->reloc_link, &eb->relocs);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2018-06-10 19:43:09 +00:00
|
|
|
/*
|
|
|
|
* SNA is doing fancy tricks with compressing batch buffers, which leads
|
|
|
|
* to negative relocation deltas. Usually that works out ok since the
|
|
|
|
* relocate address is still positive, except when the batch is placed
|
|
|
|
* very low in the GTT. Ensure this doesn't happen.
|
|
|
|
*
|
|
|
|
* Note that actual hangs have only been observed on gen7, but for
|
|
|
|
* paranoia do it everywhere.
|
|
|
|
*/
|
|
|
|
if (i == batch_idx) {
|
2018-06-21 07:32:05 +00:00
|
|
|
if (entry->relocation_count &&
|
2020-03-03 20:43:44 +00:00
|
|
|
!(ev->flags & EXEC_OBJECT_PINNED))
|
|
|
|
ev->flags |= __EXEC_OBJECT_NEEDS_BIAS;
|
2018-06-10 19:43:09 +00:00
|
|
|
if (eb->reloc_cache.has_fence)
|
2020-03-03 20:43:44 +00:00
|
|
|
ev->flags |= EXEC_OBJECT_NEEDS_FENCE;
|
2018-06-10 19:43:09 +00:00
|
|
|
|
2020-03-03 20:43:44 +00:00
|
|
|
eb->batch = ev;
|
2018-06-10 19:43:09 +00:00
|
|
|
}
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
}
|
|
|
|
|
2020-09-08 05:41:17 +00:00
|
|
|
static inline int use_cpu_reloc(const struct reloc_cache *cache,
|
|
|
|
const struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
if (!i915_gem_object_has_struct_page(obj))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return (cache->has_llc ||
|
|
|
|
obj->cache_dirty ||
|
|
|
|
obj->cache_level != I915_CACHE_NONE);
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
static int eb_reserve_vma(struct i915_execbuffer *eb,
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma *ev,
|
2020-03-03 20:43:43 +00:00
|
|
|
u64 pin_flags)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
{
|
2020-03-03 20:43:44 +00:00
|
|
|
struct drm_i915_gem_exec_object2 *entry = ev->exec;
|
|
|
|
struct i915_vma *vma = ev->vma;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
int err;
|
|
|
|
|
2020-03-03 20:43:45 +00:00
|
|
|
if (drm_mm_node_allocated(&vma->node) &&
|
|
|
|
eb_vma_misplaced(entry, vma, ev->flags)) {
|
|
|
|
err = i915_vma_unbind(vma);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
err = i915_vma_pin_ww(vma, &eb->ww,
|
2017-08-16 08:52:06 +00:00
|
|
|
entry->pad_to_size, entry->alignment,
|
2020-04-01 19:41:35 +00:00
|
|
|
eb_pin_flags(entry, ev->flags) | pin_flags);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (entry->offset != vma->node.start) {
|
|
|
|
entry->offset = vma->node.start | UPDATE;
|
|
|
|
eb->args->flags |= __EXEC_HAS_RELOC;
|
|
|
|
}
|
|
|
|
|
2020-04-01 19:41:35 +00:00
|
|
|
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
|
2017-10-09 08:43:56 +00:00
|
|
|
err = i915_vma_pin_fence(vma);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (unlikely(err)) {
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-10-09 08:43:56 +00:00
|
|
|
if (vma->fence)
|
2020-04-01 19:41:35 +00:00
|
|
|
ev->flags |= __EXEC_OBJECT_HAS_FENCE;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
}
|
|
|
|
|
2020-04-01 19:41:35 +00:00
|
|
|
ev->flags |= __EXEC_OBJECT_HAS_PIN;
|
2020-03-03 20:43:44 +00:00
|
|
|
GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags));
|
2017-07-21 14:50:35 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int eb_reserve(struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
const unsigned int count = eb->buffer_count;
|
2020-03-03 20:43:43 +00:00
|
|
|
unsigned int pin_flags = PIN_USER | PIN_NONBLOCK;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
struct list_head last;
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma *ev;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
unsigned int i, pass;
|
2020-03-06 07:16:14 +00:00
|
|
|
int err = 0;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Attempt to pin all of the buffers into the GTT.
|
|
|
|
* This is done in 3 phases:
|
|
|
|
*
|
|
|
|
* 1a. Unbind all objects that do not match the GTT constraints for
|
|
|
|
* the execbuffer (fenceable, mappable, alignment etc).
|
|
|
|
* 1b. Increment pin count for already bound objects.
|
|
|
|
* 2. Bind new objects.
|
|
|
|
* 3. Decrement pin count.
|
|
|
|
*
|
|
|
|
* This avoid unnecessary unbinding of later objects in order to make
|
|
|
|
* room for the earlier objects *unless* we need to defragment.
|
|
|
|
*/
|
|
|
|
pass = 0;
|
|
|
|
do {
|
2020-03-03 20:43:44 +00:00
|
|
|
list_for_each_entry(ev, &eb->unbound, bind_link) {
|
|
|
|
err = eb_reserve_vma(eb, ev, pin_flags);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
2020-08-19 14:08:43 +00:00
|
|
|
if (err != -ENOSPC)
|
2020-08-19 14:08:48 +00:00
|
|
|
return err;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
|
|
|
/* Resort *all* the objects into priority order */
|
|
|
|
INIT_LIST_HEAD(&eb->unbound);
|
|
|
|
INIT_LIST_HEAD(&last);
|
|
|
|
for (i = 0; i < count; i++) {
|
2020-03-03 20:43:44 +00:00
|
|
|
unsigned int flags;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2020-03-03 20:43:44 +00:00
|
|
|
ev = &eb->vma[i];
|
|
|
|
flags = ev->flags;
|
2017-08-16 08:52:06 +00:00
|
|
|
if (flags & EXEC_OBJECT_PINNED &&
|
|
|
|
flags & __EXEC_OBJECT_HAS_PIN)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
continue;
|
|
|
|
|
2020-03-03 20:43:44 +00:00
|
|
|
eb_unreserve_vma(ev);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2017-08-16 08:52:06 +00:00
|
|
|
if (flags & EXEC_OBJECT_PINNED)
|
2018-09-12 10:11:33 +00:00
|
|
|
/* Pinned must have their slot */
|
2020-03-03 20:43:44 +00:00
|
|
|
list_add(&ev->bind_link, &eb->unbound);
|
2017-08-16 08:52:06 +00:00
|
|
|
else if (flags & __EXEC_OBJECT_NEEDS_MAP)
|
2018-09-12 10:11:33 +00:00
|
|
|
/* Map require the lowest 256MiB (aperture) */
|
2020-03-03 20:43:44 +00:00
|
|
|
list_add_tail(&ev->bind_link, &eb->unbound);
|
2018-09-12 10:11:33 +00:00
|
|
|
else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
|
|
|
|
/* Prioritise 4GiB region for restricted bo */
|
2020-03-03 20:43:44 +00:00
|
|
|
list_add(&ev->bind_link, &last);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
else
|
2020-03-03 20:43:44 +00:00
|
|
|
list_add_tail(&ev->bind_link, &last);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
}
|
|
|
|
list_splice_tail(&last, &eb->unbound);
|
|
|
|
|
|
|
|
switch (pass++) {
|
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 1:
|
|
|
|
/* Too fragmented, unbind everything and retry */
|
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).
In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.
Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!
v2: Add some commentary, and some helpers to reduce patch churn.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 13:39:58 +00:00
|
|
|
mutex_lock(&eb->context->vm->mutex);
|
2019-07-30 14:32:09 +00:00
|
|
|
err = i915_gem_evict_vm(eb->context->vm);
|
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).
In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.
Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!
v2: Add some commentary, and some helpers to reduce patch churn.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 13:39:58 +00:00
|
|
|
mutex_unlock(&eb->context->vm->mutex);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (err)
|
2020-08-19 14:08:48 +00:00
|
|
|
return err;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2020-08-19 14:08:48 +00:00
|
|
|
return -ENOSPC;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
}
|
2020-03-03 20:43:43 +00:00
|
|
|
|
|
|
|
pin_flags = PIN_USER;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
} while (1);
|
2017-06-16 14:05:16 +00:00
|
|
|
}
|
2016-08-04 15:32:31 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
|
|
|
|
{
|
2017-06-16 14:05:23 +00:00
|
|
|
if (eb->args->flags & I915_EXEC_BATCH_FIRST)
|
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return eb->buffer_count - 1;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int eb_select_context(struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
struct i915_gem_context *ctx;
|
|
|
|
|
|
|
|
ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
|
2017-06-20 11:05:47 +00:00
|
|
|
if (unlikely(!ctx))
|
|
|
|
return -ENOENT;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2019-04-25 05:01:43 +00:00
|
|
|
eb->gem_context = ctx;
|
2019-10-04 13:40:09 +00:00
|
|
|
if (rcu_access_pointer(ctx->vm))
|
2018-09-01 09:24:51 +00:00
|
|
|
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
|
|
|
eb->context_flags = 0;
|
2018-09-11 13:22:06 +00:00
|
|
|
if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
static int __eb_add_lut(struct i915_execbuffer *eb,
|
|
|
|
u32 handle, struct i915_vma *vma)
|
2013-01-08 10:53:14 +00:00
|
|
|
{
|
2020-03-23 09:28:41 +00:00
|
|
|
struct i915_gem_context *ctx = eb->gem_context;
|
|
|
|
struct i915_lut_handle *lut;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
int err;
|
2013-01-08 10:53:14 +00:00
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
lut = i915_lut_handle_alloc();
|
|
|
|
if (unlikely(!lut))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
i915_vma_get(vma);
|
|
|
|
if (!atomic_fetch_inc(&vma->open_count))
|
|
|
|
i915_vma_reopen(vma);
|
|
|
|
lut->handle = handle;
|
|
|
|
lut->ctx = ctx;
|
|
|
|
|
|
|
|
/* Check that the context hasn't been closed in the meantime */
|
|
|
|
err = -EINTR;
|
2020-07-03 00:43:06 +00:00
|
|
|
if (!mutex_lock_interruptible(&ctx->lut_mutex)) {
|
|
|
|
struct i915_address_space *vm = rcu_access_pointer(ctx->vm);
|
|
|
|
|
|
|
|
if (unlikely(vm && vma->vm != vm))
|
|
|
|
err = -EAGAIN; /* user racing with ctx set-vm */
|
|
|
|
else if (likely(!i915_gem_context_is_closed(ctx)))
|
2020-03-23 09:28:41 +00:00
|
|
|
err = radix_tree_insert(&ctx->handles_vma, handle, vma);
|
2020-07-03 00:43:06 +00:00
|
|
|
else
|
|
|
|
err = -ENOENT;
|
2020-03-23 09:28:41 +00:00
|
|
|
if (err == 0) { /* And nor has this handle */
|
|
|
|
struct drm_i915_gem_object *obj = vma->obj;
|
|
|
|
|
2020-07-01 08:44:39 +00:00
|
|
|
spin_lock(&obj->lut_lock);
|
2020-03-23 09:28:41 +00:00
|
|
|
if (idr_find(&eb->file->object_idr, handle) == obj) {
|
|
|
|
list_add(&lut->obj_link, &obj->lut_list);
|
|
|
|
} else {
|
|
|
|
radix_tree_delete(&ctx->handles_vma, handle);
|
|
|
|
err = -ENOENT;
|
|
|
|
}
|
2020-07-01 08:44:39 +00:00
|
|
|
spin_unlock(&obj->lut_lock);
|
2020-03-23 09:28:41 +00:00
|
|
|
}
|
2020-07-03 00:43:06 +00:00
|
|
|
mutex_unlock(&ctx->lut_mutex);
|
2020-03-23 09:28:41 +00:00
|
|
|
}
|
|
|
|
if (unlikely(err))
|
|
|
|
goto err;
|
2020-03-03 20:43:45 +00:00
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
return 0;
|
2017-06-15 08:14:34 +00:00
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
err:
|
2020-04-22 19:05:58 +00:00
|
|
|
i915_vma_close(vma);
|
2020-03-23 09:28:41 +00:00
|
|
|
i915_vma_put(vma);
|
|
|
|
i915_lut_handle_free(lut);
|
|
|
|
return err;
|
|
|
|
}
|
2018-06-10 19:43:09 +00:00
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
|
|
|
|
{
|
2020-07-03 00:43:06 +00:00
|
|
|
struct i915_address_space *vm = eb->context->vm;
|
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
do {
|
|
|
|
struct drm_i915_gem_object *obj;
|
2017-08-16 08:52:07 +00:00
|
|
|
struct i915_vma *vma;
|
2020-03-23 09:28:41 +00:00
|
|
|
int err;
|
2017-06-16 14:05:16 +00:00
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
rcu_read_lock();
|
|
|
|
vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle);
|
2020-07-03 00:43:06 +00:00
|
|
|
if (likely(vma && vma->vm == vm))
|
2020-03-23 09:28:41 +00:00
|
|
|
vma = i915_vma_tryget(vma);
|
|
|
|
rcu_read_unlock();
|
|
|
|
if (likely(vma))
|
|
|
|
return vma;
|
2017-06-16 14:05:16 +00:00
|
|
|
|
2017-08-16 08:52:07 +00:00
|
|
|
obj = i915_gem_object_lookup(eb->file, handle);
|
2020-03-23 09:28:41 +00:00
|
|
|
if (unlikely(!obj))
|
|
|
|
return ERR_PTR(-ENOENT);
|
2013-01-08 10:53:14 +00:00
|
|
|
|
2020-07-03 00:43:06 +00:00
|
|
|
vma = i915_vma_instance(obj, vm, NULL);
|
2019-02-21 02:08:19 +00:00
|
|
|
if (IS_ERR(vma)) {
|
2020-03-23 09:28:41 +00:00
|
|
|
i915_gem_object_put(obj);
|
|
|
|
return vma;
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
}
|
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
err = __eb_add_lut(eb, handle, vma);
|
|
|
|
if (likely(!err))
|
|
|
|
return vma;
|
2017-08-16 08:52:08 +00:00
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
i915_gem_object_put(obj);
|
|
|
|
if (err != -EEXIST)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
} while (1);
|
|
|
|
}
|
2017-06-16 14:05:16 +00:00
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
static int eb_lookup_vmas(struct i915_execbuffer *eb)
|
|
|
|
{
|
2020-08-19 14:08:47 +00:00
|
|
|
struct drm_i915_private *i915 = eb->i915;
|
2020-03-23 09:28:41 +00:00
|
|
|
unsigned int batch = eb_batch_index(eb);
|
|
|
|
unsigned int i;
|
|
|
|
int err = 0;
|
2019-06-06 11:23:20 +00:00
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
INIT_LIST_HEAD(&eb->relocs);
|
|
|
|
|
|
|
|
for (i = 0; i < eb->buffer_count; i++) {
|
|
|
|
struct i915_vma *vma;
|
|
|
|
|
|
|
|
vma = eb_lookup_vma(eb, eb->exec[i].handle);
|
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
err = PTR_ERR(vma);
|
2020-08-19 14:08:47 +00:00
|
|
|
goto err;
|
2020-03-23 09:28:41 +00:00
|
|
|
}
|
2017-08-16 08:52:08 +00:00
|
|
|
|
2020-03-03 20:43:45 +00:00
|
|
|
err = eb_validate_vma(eb, &eb->exec[i], vma);
|
2020-03-23 09:28:41 +00:00
|
|
|
if (unlikely(err)) {
|
|
|
|
i915_vma_put(vma);
|
2020-08-19 14:08:47 +00:00
|
|
|
goto err;
|
2020-03-23 09:28:41 +00:00
|
|
|
}
|
2017-06-16 14:05:20 +00:00
|
|
|
|
2020-03-03 20:43:45 +00:00
|
|
|
eb_add_vma(eb, i, batch, vma);
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
|
|
|
|
if (i915_gem_object_is_userptr(vma->obj)) {
|
|
|
|
err = i915_gem_object_userptr_submit_init(vma->obj);
|
|
|
|
if (err) {
|
|
|
|
if (i + 1 < eb->buffer_count) {
|
|
|
|
/*
|
|
|
|
* Execbuffer code expects last vma entry to be NULL,
|
|
|
|
* since we already initialized this entry,
|
|
|
|
* set the next value to NULL or we mess up
|
|
|
|
* cleanup handling.
|
|
|
|
*/
|
|
|
|
eb->vma[i + 1].vma = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
eb->vma[i].flags |= __EXEC_OBJECT_USERPTR_INIT;
|
|
|
|
eb->args->flags |= __EXEC_USERPTR_USED;
|
|
|
|
}
|
2017-06-16 14:05:16 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:47 +00:00
|
|
|
if (unlikely(eb->batch->flags & EXEC_OBJECT_WRITE)) {
|
|
|
|
drm_dbg(&i915->drm,
|
|
|
|
"Attempting to use self-modifying batch buffer\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (range_overflows_t(u64,
|
|
|
|
eb->batch_start_offset, eb->batch_len,
|
|
|
|
eb->batch->vma->size)) {
|
|
|
|
drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (eb->batch_len == 0)
|
|
|
|
eb->batch_len = eb->batch->vma->size - eb->batch_start_offset;
|
2020-10-15 11:59:54 +00:00
|
|
|
if (unlikely(eb->batch_len == 0)) { /* impossible! */
|
|
|
|
drm_dbg(&i915->drm, "Invalid batch length\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2020-08-19 14:08:47 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
2020-03-03 20:43:44 +00:00
|
|
|
eb->vma[i].vma = NULL;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return err;
|
2013-01-08 10:53:14 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
static int eb_validate_vmas(struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&eb->unbound);
|
|
|
|
|
|
|
|
for (i = 0; i < eb->buffer_count; i++) {
|
|
|
|
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
|
|
|
|
struct eb_vma *ev = &eb->vma[i];
|
|
|
|
struct i915_vma *vma = ev->vma;
|
|
|
|
|
|
|
|
err = i915_gem_object_lock(vma->obj, &eb->ww);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2021-03-23 15:49:53 +00:00
|
|
|
err = eb_pin_vma(eb, entry, ev);
|
|
|
|
if (err == -EDEADLK)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (!err) {
|
2020-08-19 14:08:48 +00:00
|
|
|
if (entry->offset != vma->node.start) {
|
|
|
|
entry->offset = vma->node.start | UPDATE;
|
|
|
|
eb->args->flags |= __EXEC_HAS_RELOC;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
eb_unreserve_vma(ev);
|
|
|
|
|
|
|
|
list_add_tail(&ev->bind_link, &eb->unbound);
|
|
|
|
if (drm_mm_node_allocated(&vma->node)) {
|
|
|
|
err = i915_vma_unbind(vma);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-23 15:49:59 +00:00
|
|
|
if (!(ev->flags & EXEC_OBJECT_WRITE)) {
|
|
|
|
err = dma_resv_reserve_shared(vma->resv, 1);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
|
|
|
|
eb_vma_misplaced(&eb->exec[i], vma, ev->flags));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!list_empty(&eb->unbound))
|
|
|
|
return eb_reserve(eb);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-03 20:43:44 +00:00
|
|
|
static struct eb_vma *
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
|
2010-12-08 10:38:14 +00:00
|
|
|
{
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (eb->lut_size < 0) {
|
|
|
|
if (handle >= -eb->lut_size)
|
2013-01-08 10:53:17 +00:00
|
|
|
return NULL;
|
2020-03-03 20:43:44 +00:00
|
|
|
return &eb->vma[handle];
|
2013-01-08 10:53:17 +00:00
|
|
|
} else {
|
|
|
|
struct hlist_head *head;
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma *ev;
|
2010-12-08 10:38:14 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
head = &eb->buckets[hash_32(handle, eb->lut_size)];
|
2020-03-03 20:43:44 +00:00
|
|
|
hlist_for_each_entry(ev, head, node) {
|
|
|
|
if (ev->handle == handle)
|
|
|
|
return ev;
|
2013-01-08 10:53:17 +00:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
2010-12-08 10:38:14 +00:00
|
|
|
}
|
|
|
|
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
static void eb_release_vmas(struct i915_execbuffer *eb, bool final, bool release_userptr)
|
2020-08-19 14:08:44 +00:00
|
|
|
{
|
|
|
|
const unsigned int count = eb->buffer_count;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
struct eb_vma *ev = &eb->vma[i];
|
|
|
|
struct i915_vma *vma = ev->vma;
|
|
|
|
|
|
|
|
if (!vma)
|
|
|
|
break;
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
eb_unreserve_vma(ev);
|
2020-08-19 14:08:44 +00:00
|
|
|
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
if (release_userptr && ev->flags & __EXEC_OBJECT_USERPTR_INIT) {
|
|
|
|
ev->flags &= ~__EXEC_OBJECT_USERPTR_INIT;
|
|
|
|
i915_gem_object_userptr_submit_fini(vma->obj);
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
if (final)
|
|
|
|
i915_vma_put(vma);
|
2020-08-19 14:08:44 +00:00
|
|
|
}
|
2020-08-19 14:08:52 +00:00
|
|
|
|
|
|
|
eb_unpin_engine(eb);
|
2020-08-19 14:08:44 +00:00
|
|
|
}
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
static void eb_destroy(const struct i915_execbuffer *eb)
|
2015-12-29 17:24:52 +00:00
|
|
|
{
|
2017-06-16 14:05:24 +00:00
|
|
|
GEM_BUG_ON(eb->reloc_cache.rq);
|
|
|
|
|
2017-06-29 15:04:25 +00:00
|
|
|
if (eb->lut_size > 0)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
kfree(eb->buckets);
|
2015-12-29 17:24:52 +00:00
|
|
|
}
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
static inline u64
|
2016-08-18 16:16:52 +00:00
|
|
|
relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
const struct i915_vma *target)
|
2015-12-29 17:24:52 +00:00
|
|
|
{
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return gen8_canonical_addr((int)reloc->delta + target->node.start);
|
2015-12-29 17:24:52 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
static void reloc_cache_clear(struct reloc_cache *cache)
|
|
|
|
{
|
|
|
|
cache->rq = NULL;
|
|
|
|
cache->rq_cmd = NULL;
|
|
|
|
cache->pool = NULL;
|
|
|
|
cache->rq_size = 0;
|
|
|
|
}
|
|
|
|
|
2016-08-18 16:16:52 +00:00
|
|
|
static void reloc_cache_init(struct reloc_cache *cache,
|
|
|
|
struct drm_i915_private *i915)
|
2013-08-21 16:10:51 +00:00
|
|
|
{
|
2020-09-08 05:41:17 +00:00
|
|
|
cache->page = -1;
|
|
|
|
cache->vaddr = 0;
|
2016-11-03 08:39:46 +00:00
|
|
|
/* Must be a variable in the struct to allow GCC to unroll. */
|
2021-04-13 05:09:59 +00:00
|
|
|
cache->graphics_ver = GRAPHICS_VER(i915);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
cache->has_llc = HAS_LLC(i915);
|
2016-11-03 08:39:46 +00:00
|
|
|
cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
|
2021-04-13 05:09:59 +00:00
|
|
|
cache->has_fence = cache->graphics_ver < 4;
|
2017-06-16 14:05:24 +00:00
|
|
|
cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
|
2019-10-03 21:00:59 +00:00
|
|
|
cache->node.flags = 0;
|
2020-08-19 14:08:48 +00:00
|
|
|
reloc_cache_clear(cache);
|
2016-08-18 16:16:52 +00:00
|
|
|
}
|
2013-08-21 16:10:51 +00:00
|
|
|
|
2020-09-08 05:41:43 +00:00
|
|
|
static inline void *unmask_page(unsigned long p)
|
|
|
|
{
|
|
|
|
return (void *)(uintptr_t)(p & PAGE_MASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int unmask_flags(unsigned long p)
|
|
|
|
{
|
|
|
|
return p & ~PAGE_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define KMAP 0x4 /* after CLFLUSH_FLAGS */
|
|
|
|
|
|
|
|
static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 =
|
|
|
|
container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
|
|
|
|
return &i915->ggtt;
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
static void reloc_cache_put_pool(struct i915_execbuffer *eb, struct reloc_cache *cache)
|
drm/i915/gem: Use chained reloc batches
The ring is a precious resource: we anticipate to only use a few hundred
bytes for a request, and only try to reserve that before we start. If we
go beyond our guess in building the request, then instead of waiting at
the start of execbuf before we hold any locks or other resources, we
may trigger a wait inside a critical region. One example is in using gpu
relocations, where currently we emit a new MI_BB_START from the ring
every time we overflow a page of relocation entries. However, instead of
insert the command into the precious ring, we can chain the next page of
relocation entries as MI_BB_START from the end of the previous.
v2: Delay the emit_bb_start until after all the chained vma
synchronisation is complete. Since the buffer pool batches are idle, this
_should_ be a no-op, but one day we may some fancy async GPU bindings
for new vma!
v3: Use pool/batch consitently, once we start thinking in terms of the
batch vma, use batch->obj.
v4: Explain the magic number 4.
Tvrtko spotted that we lose propagation of the error for failing to
submit the relocation request; that's easier to fix up in the next
patch.
Testcase: igt/gem_exec_reloc/basic-many-active
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200501192945.22215-1-chris@chris-wilson.co.uk
2020-05-01 19:29:43 +00:00
|
|
|
{
|
2020-08-19 14:08:48 +00:00
|
|
|
if (!cache->pool)
|
|
|
|
return;
|
drm/i915/gem: Use chained reloc batches
The ring is a precious resource: we anticipate to only use a few hundred
bytes for a request, and only try to reserve that before we start. If we
go beyond our guess in building the request, then instead of waiting at
the start of execbuf before we hold any locks or other resources, we
may trigger a wait inside a critical region. One example is in using gpu
relocations, where currently we emit a new MI_BB_START from the ring
every time we overflow a page of relocation entries. However, instead of
insert the command into the precious ring, we can chain the next page of
relocation entries as MI_BB_START from the end of the previous.
v2: Delay the emit_bb_start until after all the chained vma
synchronisation is complete. Since the buffer pool batches are idle, this
_should_ be a no-op, but one day we may some fancy async GPU bindings
for new vma!
v3: Use pool/batch consitently, once we start thinking in terms of the
batch vma, use batch->obj.
v4: Explain the magic number 4.
Tvrtko spotted that we lose propagation of the error for failing to
submit the relocation request; that's easier to fix up in the next
patch.
Testcase: igt/gem_exec_reloc/basic-many-active
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200501192945.22215-1-chris@chris-wilson.co.uk
2020-05-01 19:29:43 +00:00
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
/*
|
|
|
|
* This is a bit nasty, normally we keep objects locked until the end
|
|
|
|
* of execbuffer, but we already submit this, and have to unlock before
|
|
|
|
* dropping the reference. Fortunately we can only hold 1 pool node at
|
|
|
|
* a time, so this should be harmless.
|
|
|
|
*/
|
|
|
|
i915_gem_ww_unlock_single(cache->pool->obj);
|
|
|
|
intel_gt_buffer_pool_put(cache->pool);
|
|
|
|
cache->pool = NULL;
|
drm/i915/gem: Use chained reloc batches
The ring is a precious resource: we anticipate to only use a few hundred
bytes for a request, and only try to reserve that before we start. If we
go beyond our guess in building the request, then instead of waiting at
the start of execbuf before we hold any locks or other resources, we
may trigger a wait inside a critical region. One example is in using gpu
relocations, where currently we emit a new MI_BB_START from the ring
every time we overflow a page of relocation entries. However, instead of
insert the command into the precious ring, we can chain the next page of
relocation entries as MI_BB_START from the end of the previous.
v2: Delay the emit_bb_start until after all the chained vma
synchronisation is complete. Since the buffer pool batches are idle, this
_should_ be a no-op, but one day we may some fancy async GPU bindings
for new vma!
v3: Use pool/batch consitently, once we start thinking in terms of the
batch vma, use batch->obj.
v4: Explain the magic number 4.
Tvrtko spotted that we lose propagation of the error for failing to
submit the relocation request; that's easier to fix up in the next
patch.
Testcase: igt/gem_exec_reloc/basic-many-active
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200501192945.22215-1-chris@chris-wilson.co.uk
2020-05-01 19:29:43 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
static void reloc_gpu_flush(struct i915_execbuffer *eb, struct reloc_cache *cache)
|
2017-06-16 14:05:24 +00:00
|
|
|
{
|
2020-08-19 14:08:42 +00:00
|
|
|
struct drm_i915_gem_object *obj = cache->rq->batch->obj;
|
drm/i915: Flush pages on acquisition
When we return pages to the system, we ensure that they are marked as
being in the CPU domain since any external access is uncontrolled and we
must assume the worst. This means that we need to always flush the pages
on acquisition if we need to use them on the GPU, and from the beginning
have used set-domain. Set-domain is overkill for the purpose as it is a
general synchronisation barrier, but our intent is to only flush the
pages being swapped in. If we move that flush into the pages acquisition
phase, we know then that when we have obj->mm.pages, they are coherent
with the GPU and need only maintain that status without resorting to
heavy handed use of set-domain.
The principle knock-on effect for userspace is through mmap-gtt
pagefaulting. Our uAPI has always implied that the GTT mmap was async
(especially as when any pagefault occurs is unpredicatable to userspace)
and so userspace had to apply explicit domain control itself
(set-domain). However, swapping is transparent to the kernel, and so on
first fault we need to acquire the pages and make them coherent for
access through the GTT. Our use of set-domain here leaks into the uABI
that the first pagefault was synchronous. This is unintentional and
baring a few igt should be unoticed, nevertheless we bump the uABI
version for mmap-gtt to reflect the change in behaviour.
Another implication of the change is that gem_create() is presumed to
create an object that is coherent with the CPU and is in the CPU write
domain, so a set-domain(CPU) following a gem_create() would be a minor
operation that merely checked whether we could allocate all pages for
the object. On applying this change, a set-domain(CPU) causes a clflush
as we acquire the pages. This will have a small impact on mesa as we move
the clflush here on !llc from execbuf time to create, but that should
have minimal performance impact as the same clflush exists but is now
done early and because of the clflush issue, userspace recycles bo and
so should resist allocating fresh objects.
Internally, the presumption that objects are created in the CPU
write-domain and remain so through writes to obj->mm.mapping is more
prevalent than I expected; but easy enough to catch and apply a manual
flush.
For the future, we should push the page flush from the central
set_pages() into the callers so that we can more finely control when it
is applied, but for now doing it one location is easier to validate, at
the cost of sometimes flushing when there is no need.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Antonio Argenziano <antonio.argenziano@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321161908.8007-1-chris@chris-wilson.co.uk
2019-03-21 16:19:07 +00:00
|
|
|
|
2020-08-19 14:08:42 +00:00
|
|
|
GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
|
|
|
|
cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
|
drm/i915: Flush pages on acquisition
When we return pages to the system, we ensure that they are marked as
being in the CPU domain since any external access is uncontrolled and we
must assume the worst. This means that we need to always flush the pages
on acquisition if we need to use them on the GPU, and from the beginning
have used set-domain. Set-domain is overkill for the purpose as it is a
general synchronisation barrier, but our intent is to only flush the
pages being swapped in. If we move that flush into the pages acquisition
phase, we know then that when we have obj->mm.pages, they are coherent
with the GPU and need only maintain that status without resorting to
heavy handed use of set-domain.
The principle knock-on effect for userspace is through mmap-gtt
pagefaulting. Our uAPI has always implied that the GTT mmap was async
(especially as when any pagefault occurs is unpredicatable to userspace)
and so userspace had to apply explicit domain control itself
(set-domain). However, swapping is transparent to the kernel, and so on
first fault we need to acquire the pages and make them coherent for
access through the GTT. Our use of set-domain here leaks into the uABI
that the first pagefault was synchronous. This is unintentional and
baring a few igt should be unoticed, nevertheless we bump the uABI
version for mmap-gtt to reflect the change in behaviour.
Another implication of the change is that gem_create() is presumed to
create an object that is coherent with the CPU and is in the CPU write
domain, so a set-domain(CPU) following a gem_create() would be a minor
operation that merely checked whether we could allocate all pages for
the object. On applying this change, a set-domain(CPU) causes a clflush
as we acquire the pages. This will have a small impact on mesa as we move
the clflush here on !llc from execbuf time to create, but that should
have minimal performance impact as the same clflush exists but is now
done early and because of the clflush issue, userspace recycles bo and
so should resist allocating fresh objects.
Internally, the presumption that objects are created in the CPU
write-domain and remain so through writes to obj->mm.mapping is more
prevalent than I expected; but easy enough to catch and apply a manual
flush.
For the future, we should push the page flush from the central
set_pages() into the callers so that we can more finely control when it
is applied, but for now doing it one location is easier to validate, at
the cost of sometimes flushing when there is no need.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Antonio Argenziano <antonio.argenziano@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321161908.8007-1-chris@chris-wilson.co.uk
2019-03-21 16:19:07 +00:00
|
|
|
|
2020-12-24 15:13:58 +00:00
|
|
|
i915_gem_object_flush_map(obj);
|
2020-08-19 14:08:42 +00:00
|
|
|
i915_gem_object_unpin_map(obj);
|
2017-06-16 14:05:24 +00:00
|
|
|
|
2020-08-19 14:08:42 +00:00
|
|
|
intel_gt_chipset_flush(cache->rq->engine->gt);
|
drm/i915/gem: Use chained reloc batches
The ring is a precious resource: we anticipate to only use a few hundred
bytes for a request, and only try to reserve that before we start. If we
go beyond our guess in building the request, then instead of waiting at
the start of execbuf before we hold any locks or other resources, we
may trigger a wait inside a critical region. One example is in using gpu
relocations, where currently we emit a new MI_BB_START from the ring
every time we overflow a page of relocation entries. However, instead of
insert the command into the precious ring, we can chain the next page of
relocation entries as MI_BB_START from the end of the previous.
v2: Delay the emit_bb_start until after all the chained vma
synchronisation is complete. Since the buffer pool batches are idle, this
_should_ be a no-op, but one day we may some fancy async GPU bindings
for new vma!
v3: Use pool/batch consitently, once we start thinking in terms of the
batch vma, use batch->obj.
v4: Explain the magic number 4.
Tvrtko spotted that we lose propagation of the error for failing to
submit the relocation request; that's easier to fix up in the next
patch.
Testcase: igt/gem_exec_reloc/basic-many-active
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200501192945.22215-1-chris@chris-wilson.co.uk
2020-05-01 19:29:43 +00:00
|
|
|
|
2020-08-19 14:08:42 +00:00
|
|
|
i915_request_add(cache->rq);
|
2020-08-19 14:08:48 +00:00
|
|
|
reloc_cache_put_pool(eb, cache);
|
|
|
|
reloc_cache_clear(cache);
|
2020-05-01 19:29:44 +00:00
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
eb->reloc_pool = NULL;
|
2017-06-16 14:05:24 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer *eb)
|
2020-09-08 05:41:17 +00:00
|
|
|
{
|
|
|
|
void *vaddr;
|
|
|
|
|
2020-08-19 14:08:42 +00:00
|
|
|
if (cache->rq)
|
2020-08-19 14:08:48 +00:00
|
|
|
reloc_gpu_flush(eb, cache);
|
2020-08-19 14:08:42 +00:00
|
|
|
|
2020-09-08 05:41:17 +00:00
|
|
|
if (!cache->vaddr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
vaddr = unmask_page(cache->vaddr);
|
|
|
|
if (cache->vaddr & KMAP) {
|
2020-08-19 14:08:46 +00:00
|
|
|
struct drm_i915_gem_object *obj =
|
|
|
|
(struct drm_i915_gem_object *)cache->node.mm;
|
2020-09-08 05:41:17 +00:00
|
|
|
if (cache->vaddr & CLFLUSH_AFTER)
|
|
|
|
mb();
|
|
|
|
|
|
|
|
kunmap_atomic(vaddr);
|
2020-08-19 14:08:46 +00:00
|
|
|
i915_gem_object_finish_access(obj);
|
2020-09-08 05:41:17 +00:00
|
|
|
} else {
|
|
|
|
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
|
|
|
|
|
|
|
|
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
|
|
|
|
io_mapping_unmap_atomic((void __iomem *)vaddr);
|
|
|
|
|
|
|
|
if (drm_mm_node_allocated(&cache->node)) {
|
|
|
|
ggtt->vm.clear_range(&ggtt->vm,
|
|
|
|
cache->node.start,
|
|
|
|
cache->node.size);
|
|
|
|
mutex_lock(&ggtt->vm.mutex);
|
|
|
|
drm_mm_remove_node(&cache->node);
|
|
|
|
mutex_unlock(&ggtt->vm.mutex);
|
|
|
|
} else {
|
|
|
|
i915_vma_unpin((struct i915_vma *)cache->node.mm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cache->vaddr = 0;
|
|
|
|
cache->page = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *reloc_kmap(struct drm_i915_gem_object *obj,
|
|
|
|
struct reloc_cache *cache,
|
2020-08-19 14:08:41 +00:00
|
|
|
unsigned long pageno)
|
2020-09-08 05:41:17 +00:00
|
|
|
{
|
|
|
|
void *vaddr;
|
2020-08-19 14:08:41 +00:00
|
|
|
struct page *page;
|
2020-09-08 05:41:17 +00:00
|
|
|
|
|
|
|
if (cache->vaddr) {
|
|
|
|
kunmap_atomic(unmask_page(cache->vaddr));
|
|
|
|
} else {
|
|
|
|
unsigned int flushes;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = i915_gem_object_prepare_write(obj, &flushes);
|
|
|
|
if (err)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
|
|
|
|
BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
|
|
|
|
BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
|
|
|
|
|
|
|
|
cache->vaddr = flushes | KMAP;
|
|
|
|
cache->node.mm = (void *)obj;
|
|
|
|
if (flushes)
|
|
|
|
mb();
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:41 +00:00
|
|
|
page = i915_gem_object_get_page(obj, pageno);
|
|
|
|
if (!obj->mm.dirty)
|
|
|
|
set_page_dirty(page);
|
|
|
|
|
|
|
|
vaddr = kmap_atomic(page);
|
2020-09-08 05:41:17 +00:00
|
|
|
cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
|
2020-08-19 14:08:41 +00:00
|
|
|
cache->page = pageno;
|
2020-09-08 05:41:17 +00:00
|
|
|
|
|
|
|
return vaddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *reloc_iomap(struct drm_i915_gem_object *obj,
|
2020-08-19 14:08:54 +00:00
|
|
|
struct i915_execbuffer *eb,
|
2020-09-08 05:41:17 +00:00
|
|
|
unsigned long page)
|
|
|
|
{
|
2020-08-19 14:08:54 +00:00
|
|
|
struct reloc_cache *cache = &eb->reloc_cache;
|
2020-09-08 05:41:17 +00:00
|
|
|
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
|
|
|
|
unsigned long offset;
|
|
|
|
void *vaddr;
|
|
|
|
|
|
|
|
if (cache->vaddr) {
|
|
|
|
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
|
|
|
|
io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
|
|
|
|
} else {
|
|
|
|
struct i915_vma *vma;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (i915_gem_object_is_tiled(obj))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
if (use_cpu_reloc(cache, obj))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
err = i915_gem_object_set_to_gtt_domain(obj, true);
|
|
|
|
if (err)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
vma = i915_gem_object_ggtt_pin_ww(obj, &eb->ww, NULL, 0, 0,
|
|
|
|
PIN_MAPPABLE |
|
|
|
|
PIN_NONBLOCK /* NOWARN */ |
|
|
|
|
PIN_NOEVICT);
|
|
|
|
if (vma == ERR_PTR(-EDEADLK))
|
|
|
|
return vma;
|
|
|
|
|
2020-09-08 05:41:17 +00:00
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
memset(&cache->node, 0, sizeof(cache->node));
|
|
|
|
mutex_lock(&ggtt->vm.mutex);
|
|
|
|
err = drm_mm_insert_node_in_range
|
|
|
|
(&ggtt->vm.mm, &cache->node,
|
|
|
|
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
|
|
|
|
0, ggtt->mappable_end,
|
|
|
|
DRM_MM_INSERT_LOW);
|
|
|
|
mutex_unlock(&ggtt->vm.mutex);
|
|
|
|
if (err) /* no inactive aperture space, use cpu reloc */
|
|
|
|
return NULL;
|
|
|
|
} else {
|
|
|
|
cache->node.start = vma->node.start;
|
|
|
|
cache->node.mm = (void *)vma;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
offset = cache->node.start;
|
|
|
|
if (drm_mm_node_allocated(&cache->node)) {
|
|
|
|
ggtt->vm.insert_page(&ggtt->vm,
|
|
|
|
i915_gem_object_get_dma_address(obj, page),
|
|
|
|
offset, I915_CACHE_NONE, 0);
|
|
|
|
} else {
|
|
|
|
offset += page << PAGE_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
|
|
|
|
offset);
|
|
|
|
cache->page = page;
|
|
|
|
cache->vaddr = (unsigned long)vaddr;
|
|
|
|
|
|
|
|
return vaddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *reloc_vaddr(struct drm_i915_gem_object *obj,
|
2020-08-19 14:08:54 +00:00
|
|
|
struct i915_execbuffer *eb,
|
2020-09-08 05:41:17 +00:00
|
|
|
unsigned long page)
|
|
|
|
{
|
2020-08-19 14:08:54 +00:00
|
|
|
struct reloc_cache *cache = &eb->reloc_cache;
|
2020-09-08 05:41:17 +00:00
|
|
|
void *vaddr;
|
|
|
|
|
|
|
|
if (cache->page == page) {
|
|
|
|
vaddr = unmask_page(cache->vaddr);
|
|
|
|
} else {
|
|
|
|
vaddr = NULL;
|
|
|
|
if ((cache->vaddr & KMAP) == 0)
|
2020-08-19 14:08:54 +00:00
|
|
|
vaddr = reloc_iomap(obj, eb, page);
|
2020-09-08 05:41:17 +00:00
|
|
|
if (!vaddr)
|
|
|
|
vaddr = reloc_kmap(obj, cache, page);
|
|
|
|
}
|
|
|
|
|
|
|
|
return vaddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
|
|
|
|
{
|
|
|
|
if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
|
|
|
|
if (flushes & CLFLUSH_BEFORE) {
|
|
|
|
clflushopt(addr);
|
|
|
|
mb();
|
|
|
|
}
|
|
|
|
|
|
|
|
*addr = value;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Writes to the same cacheline are serialised by the CPU
|
|
|
|
* (including clflush). On the write path, we only require
|
|
|
|
* that it hits memory in an orderly fashion and place
|
|
|
|
* mb barriers at the start and end of the relocation phase
|
|
|
|
* to ensure ordering of clflush wrt to the system.
|
|
|
|
*/
|
|
|
|
if (flushes & CLFLUSH_AFTER)
|
|
|
|
clflushopt(addr);
|
|
|
|
} else
|
|
|
|
*addr = value;
|
|
|
|
}
|
|
|
|
|
2019-05-28 09:29:51 +00:00
|
|
|
static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_object *obj = vma->obj;
|
|
|
|
int err;
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
assert_vma_held(vma);
|
2019-05-28 09:29:51 +00:00
|
|
|
|
|
|
|
if (obj->cache_dirty & ~obj->cache_coherent)
|
|
|
|
i915_gem_clflush_object(obj, 0);
|
|
|
|
obj->write_domain = 0;
|
|
|
|
|
|
|
|
err = i915_request_await_object(rq, vma->obj, true);
|
|
|
|
if (err == 0)
|
|
|
|
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-06-16 14:05:24 +00:00
|
|
|
static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
|
2020-05-01 19:29:45 +00:00
|
|
|
struct intel_engine_cs *engine,
|
2020-08-19 14:08:42 +00:00
|
|
|
struct i915_vma *vma,
|
2017-06-16 14:05:24 +00:00
|
|
|
unsigned int len)
|
|
|
|
{
|
|
|
|
struct reloc_cache *cache = &eb->reloc_cache;
|
2020-08-19 14:08:48 +00:00
|
|
|
struct intel_gt_buffer_pool_node *pool = eb->reloc_pool;
|
2018-02-21 09:56:36 +00:00
|
|
|
struct i915_request *rq;
|
2017-06-16 14:05:24 +00:00
|
|
|
struct i915_vma *batch;
|
|
|
|
u32 *cmd;
|
|
|
|
int err;
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
if (!pool) {
|
2021-01-19 13:31:06 +00:00
|
|
|
pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE,
|
|
|
|
cache->has_llc ?
|
|
|
|
I915_MAP_WB :
|
|
|
|
I915_MAP_WC);
|
2020-08-19 14:08:48 +00:00
|
|
|
if (IS_ERR(pool))
|
|
|
|
return PTR_ERR(pool);
|
|
|
|
}
|
|
|
|
eb->reloc_pool = NULL;
|
|
|
|
|
|
|
|
err = i915_gem_object_lock(pool->obj, &eb->ww);
|
|
|
|
if (err)
|
|
|
|
goto err_pool;
|
2017-06-16 14:05:24 +00:00
|
|
|
|
2021-01-19 13:31:06 +00:00
|
|
|
cmd = i915_gem_object_pin_map(pool->obj, pool->type);
|
2019-08-04 12:48:26 +00:00
|
|
|
if (IS_ERR(cmd)) {
|
|
|
|
err = PTR_ERR(cmd);
|
2020-08-19 14:08:48 +00:00
|
|
|
goto err_pool;
|
2019-08-04 12:48:26 +00:00
|
|
|
}
|
2021-03-23 15:50:18 +00:00
|
|
|
intel_gt_buffer_pool_mark_used(pool);
|
2017-06-16 14:05:24 +00:00
|
|
|
|
2020-12-24 15:13:58 +00:00
|
|
|
memset32(cmd, 0, pool->obj->base.size / sizeof(u32));
|
|
|
|
|
2020-08-19 14:08:42 +00:00
|
|
|
batch = i915_vma_instance(pool->obj, vma->vm, NULL);
|
2017-06-16 14:05:24 +00:00
|
|
|
if (IS_ERR(batch)) {
|
|
|
|
err = PTR_ERR(batch);
|
|
|
|
goto err_unmap;
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
err = i915_vma_pin_ww(batch, &eb->ww, 0, 0, PIN_USER | PIN_NONBLOCK);
|
2017-06-16 14:05:24 +00:00
|
|
|
if (err)
|
|
|
|
goto err_unmap;
|
|
|
|
|
2020-05-01 19:29:45 +00:00
|
|
|
if (engine == eb->context->engine) {
|
|
|
|
rq = i915_request_create(eb->context);
|
|
|
|
} else {
|
2020-08-19 14:08:52 +00:00
|
|
|
struct intel_context *ce = eb->reloc_context;
|
2020-05-01 19:29:45 +00:00
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
if (!ce) {
|
|
|
|
ce = intel_context_create(engine);
|
|
|
|
if (IS_ERR(ce)) {
|
|
|
|
err = PTR_ERR(ce);
|
|
|
|
goto err_unpin;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_vm_put(ce->vm);
|
|
|
|
ce->vm = i915_vm_get(eb->context->vm);
|
|
|
|
eb->reloc_context = ce;
|
2020-05-01 19:29:45 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
err = intel_context_pin_ww(ce, &eb->ww);
|
2020-08-19 14:08:52 +00:00
|
|
|
if (err)
|
|
|
|
goto err_unpin;
|
2020-05-01 19:29:45 +00:00
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
rq = i915_request_create(ce);
|
|
|
|
intel_context_unpin(ce);
|
2020-05-01 19:29:45 +00:00
|
|
|
}
|
2017-06-16 14:05:24 +00:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto err_unpin;
|
|
|
|
}
|
|
|
|
|
2020-04-30 11:18:12 +00:00
|
|
|
err = intel_gt_buffer_pool_mark_active(pool, rq);
|
2019-08-04 12:48:26 +00:00
|
|
|
if (err)
|
|
|
|
goto err_request;
|
|
|
|
|
2020-08-19 14:08:42 +00:00
|
|
|
err = reloc_move_to_gpu(rq, vma);
|
|
|
|
if (err)
|
|
|
|
goto err_request;
|
|
|
|
|
|
|
|
err = eb->engine->emit_bb_start(rq,
|
|
|
|
batch->node.start, PAGE_SIZE,
|
2021-04-13 05:09:59 +00:00
|
|
|
cache->graphics_ver > 5 ? 0 : I915_DISPATCH_SECURE);
|
2020-08-19 14:08:42 +00:00
|
|
|
if (err)
|
|
|
|
goto skip_request;
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
assert_vma_held(batch);
|
2019-08-19 11:20:33 +00:00
|
|
|
err = i915_request_await_object(rq, batch->obj, false);
|
|
|
|
if (err == 0)
|
|
|
|
err = i915_vma_move_to_active(batch, rq, 0);
|
2018-07-06 10:39:44 +00:00
|
|
|
if (err)
|
|
|
|
goto skip_request;
|
2017-06-16 14:05:24 +00:00
|
|
|
|
|
|
|
rq->batch = batch;
|
2018-07-06 10:39:44 +00:00
|
|
|
i915_vma_unpin(batch);
|
2017-06-16 14:05:24 +00:00
|
|
|
|
|
|
|
cache->rq = rq;
|
|
|
|
cache->rq_cmd = cmd;
|
|
|
|
cache->rq_size = 0;
|
2020-08-19 14:08:48 +00:00
|
|
|
cache->pool = pool;
|
2017-06-16 14:05:24 +00:00
|
|
|
|
|
|
|
/* Return with batch mapping (cmd) still pinned */
|
2020-08-19 14:08:48 +00:00
|
|
|
return 0;
|
2017-06-16 14:05:24 +00:00
|
|
|
|
2018-07-06 10:39:44 +00:00
|
|
|
skip_request:
|
2020-03-04 12:18:48 +00:00
|
|
|
i915_request_set_error_once(rq, err);
|
2017-06-16 14:05:24 +00:00
|
|
|
err_request:
|
2018-02-21 09:56:36 +00:00
|
|
|
i915_request_add(rq);
|
2017-06-16 14:05:24 +00:00
|
|
|
err_unpin:
|
|
|
|
i915_vma_unpin(batch);
|
|
|
|
err_unmap:
|
2019-08-04 12:48:26 +00:00
|
|
|
i915_gem_object_unpin_map(pool->obj);
|
2020-08-19 14:08:48 +00:00
|
|
|
err_pool:
|
|
|
|
eb->reloc_pool = pool;
|
2017-06-16 14:05:24 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-05-04 14:06:29 +00:00
|
|
|
static bool reloc_can_use_engine(const struct intel_engine_cs *engine)
|
|
|
|
{
|
2021-06-05 15:53:54 +00:00
|
|
|
return engine->class != VIDEO_DECODE_CLASS || GRAPHICS_VER(engine->i915) != 6;
|
2020-05-04 14:06:29 +00:00
|
|
|
}
|
|
|
|
|
2017-06-16 14:05:24 +00:00
|
|
|
static u32 *reloc_gpu(struct i915_execbuffer *eb,
|
|
|
|
struct i915_vma *vma,
|
|
|
|
unsigned int len)
|
|
|
|
{
|
|
|
|
struct reloc_cache *cache = &eb->reloc_cache;
|
|
|
|
u32 *cmd;
|
2020-08-19 14:08:42 +00:00
|
|
|
|
|
|
|
if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
|
2020-08-19 14:08:48 +00:00
|
|
|
reloc_gpu_flush(eb, cache);
|
2017-06-16 14:05:24 +00:00
|
|
|
|
|
|
|
if (unlikely(!cache->rq)) {
|
2020-08-19 14:08:42 +00:00
|
|
|
int err;
|
2020-05-01 19:29:45 +00:00
|
|
|
struct intel_engine_cs *engine = eb->engine;
|
|
|
|
|
2020-05-04 14:06:29 +00:00
|
|
|
if (!reloc_can_use_engine(engine)) {
|
2020-05-01 19:29:45 +00:00
|
|
|
engine = engine->gt->engine_class[COPY_ENGINE_CLASS][0];
|
2020-05-04 14:06:29 +00:00
|
|
|
if (!engine)
|
2020-05-01 19:29:45 +00:00
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
}
|
2017-09-06 15:28:59 +00:00
|
|
|
|
2020-08-19 14:08:42 +00:00
|
|
|
err = __reloc_gpu_alloc(eb, engine, vma, len);
|
2017-06-16 14:05:24 +00:00
|
|
|
if (unlikely(err))
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd = cache->rq_cmd + cache->rq_size;
|
|
|
|
cache->rq_size += len;
|
|
|
|
|
|
|
|
return cmd;
|
|
|
|
}
|
|
|
|
|
2020-09-08 05:41:17 +00:00
|
|
|
static inline bool use_reloc_gpu(struct i915_vma *vma)
|
|
|
|
{
|
|
|
|
if (DBG_FORCE_RELOC == FORCE_GPU_RELOC)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (DBG_FORCE_RELOC)
|
|
|
|
return false;
|
|
|
|
|
2021-06-02 11:01:15 +00:00
|
|
|
return !dma_resv_test_signaled(vma->resv, true);
|
2020-09-08 05:41:17 +00:00
|
|
|
}
|
|
|
|
|
2020-05-04 14:06:29 +00:00
|
|
|
static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
|
drm/i915: Fallback to using CPU relocations for large batch buffers
If the batch buffer is too large to fit into the aperture and we need a
GTT mapping for relocations, we currently fail. This only applies to a
subset of machines for a subset of environments, quite undesirable. We
can simply check after failing to insert the batch into the GTT as to
whether we only need a mappable binding for relocation and, if so, we can
revert to using a non-mappable binding and an alternate relocation
method. However, using relocate_entry_cpu() is excruciatingly slow for
large buffers on non-LLC as the entire buffer requires clflushing before
and after the relocation handling. Alternatively, we can implement a
third relocation method that only clflushes around the relocation entry.
This is still slower than updating through the GTT, so we prefer using
the GTT where possible, but is orders of magnitude faster as we
typically do not have to then clflush the entire buffer.
An alternative idea of using a temporary WC mapping of the backing store
is promising (it should be faster than using the GTT itself), but
requires fairly extensive arch/x86 support - along the lines of
kmap_atomic_prof_pfn() (which is not universally implemented even for
x86).
Testcase: igt/gem_exec_big #pnv,byt
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=88392
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
[danvet: Add a WARN_ONCE for the impossible reloc case and explain in
a short comment why we want to avoid ping-pong.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-01-14 11:20:56 +00:00
|
|
|
{
|
2020-05-04 14:06:29 +00:00
|
|
|
struct page *page;
|
|
|
|
unsigned long addr;
|
drm/i915: Fallback to using CPU relocations for large batch buffers
If the batch buffer is too large to fit into the aperture and we need a
GTT mapping for relocations, we currently fail. This only applies to a
subset of machines for a subset of environments, quite undesirable. We
can simply check after failing to insert the batch into the GTT as to
whether we only need a mappable binding for relocation and, if so, we can
revert to using a non-mappable binding and an alternate relocation
method. However, using relocate_entry_cpu() is excruciatingly slow for
large buffers on non-LLC as the entire buffer requires clflushing before
and after the relocation handling. Alternatively, we can implement a
third relocation method that only clflushes around the relocation entry.
This is still slower than updating through the GTT, so we prefer using
the GTT where possible, but is orders of magnitude faster as we
typically do not have to then clflush the entire buffer.
An alternative idea of using a temporary WC mapping of the backing store
is promising (it should be faster than using the GTT itself), but
requires fairly extensive arch/x86 support - along the lines of
kmap_atomic_prof_pfn() (which is not universally implemented even for
x86).
Testcase: igt/gem_exec_big #pnv,byt
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=88392
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
[danvet: Add a WARN_ONCE for the impossible reloc case and explain in
a short comment why we want to avoid ping-pong.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-01-14 11:20:56 +00:00
|
|
|
|
2020-05-04 14:06:29 +00:00
|
|
|
GEM_BUG_ON(vma->pages != vma->obj->mm.pages);
|
2017-06-16 14:05:24 +00:00
|
|
|
|
2020-05-04 14:06:29 +00:00
|
|
|
page = i915_gem_object_get_page(vma->obj, offset >> PAGE_SHIFT);
|
|
|
|
addr = PFN_PHYS(page_to_pfn(page));
|
|
|
|
GEM_BUG_ON(overflows_type(addr, u32)); /* expected dma32 */
|
2017-06-16 14:05:24 +00:00
|
|
|
|
2020-05-04 14:06:29 +00:00
|
|
|
return addr + offset_in_page(offset);
|
|
|
|
}
|
|
|
|
|
2020-09-10 11:12:25 +00:00
|
|
|
static int __reloc_entry_gpu(struct i915_execbuffer *eb,
|
2020-09-08 05:41:17 +00:00
|
|
|
struct i915_vma *vma,
|
|
|
|
u64 offset,
|
|
|
|
u64 target_addr)
|
2020-05-04 14:06:29 +00:00
|
|
|
{
|
2021-04-13 05:09:59 +00:00
|
|
|
const unsigned int ver = eb->reloc_cache.graphics_ver;
|
2020-05-04 14:06:29 +00:00
|
|
|
unsigned int len;
|
|
|
|
u32 *batch;
|
|
|
|
u64 addr;
|
|
|
|
|
2021-04-13 05:09:59 +00:00
|
|
|
if (ver >= 8)
|
2020-05-04 14:06:29 +00:00
|
|
|
len = offset & 7 ? 8 : 5;
|
2021-04-13 05:09:59 +00:00
|
|
|
else if (ver >= 4)
|
2020-05-04 14:06:29 +00:00
|
|
|
len = 4;
|
|
|
|
else
|
|
|
|
len = 3;
|
|
|
|
|
|
|
|
batch = reloc_gpu(eb, vma, len);
|
2020-08-19 14:08:48 +00:00
|
|
|
if (batch == ERR_PTR(-EDEADLK))
|
2020-09-10 11:12:25 +00:00
|
|
|
return -EDEADLK;
|
2020-08-19 14:08:48 +00:00
|
|
|
else if (IS_ERR(batch))
|
2020-09-08 05:41:17 +00:00
|
|
|
return false;
|
2020-05-04 14:06:29 +00:00
|
|
|
|
|
|
|
addr = gen8_canonical_addr(vma->node.start + offset);
|
2021-04-13 05:09:59 +00:00
|
|
|
if (ver >= 8) {
|
2020-05-04 14:06:29 +00:00
|
|
|
if (offset & 7) {
|
|
|
|
*batch++ = MI_STORE_DWORD_IMM_GEN4;
|
|
|
|
*batch++ = lower_32_bits(addr);
|
|
|
|
*batch++ = upper_32_bits(addr);
|
|
|
|
*batch++ = lower_32_bits(target_addr);
|
|
|
|
|
|
|
|
addr = gen8_canonical_addr(addr + 4);
|
2017-06-16 14:05:24 +00:00
|
|
|
|
|
|
|
*batch++ = MI_STORE_DWORD_IMM_GEN4;
|
2020-05-04 14:06:29 +00:00
|
|
|
*batch++ = lower_32_bits(addr);
|
|
|
|
*batch++ = upper_32_bits(addr);
|
|
|
|
*batch++ = upper_32_bits(target_addr);
|
2017-06-16 14:05:24 +00:00
|
|
|
} else {
|
2020-05-04 14:06:29 +00:00
|
|
|
*batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
|
|
|
|
*batch++ = lower_32_bits(addr);
|
|
|
|
*batch++ = upper_32_bits(addr);
|
|
|
|
*batch++ = lower_32_bits(target_addr);
|
|
|
|
*batch++ = upper_32_bits(target_addr);
|
2017-06-16 14:05:24 +00:00
|
|
|
}
|
2021-04-13 05:09:59 +00:00
|
|
|
} else if (ver >= 6) {
|
2020-05-04 14:06:29 +00:00
|
|
|
*batch++ = MI_STORE_DWORD_IMM_GEN4;
|
|
|
|
*batch++ = 0;
|
|
|
|
*batch++ = addr;
|
|
|
|
*batch++ = target_addr;
|
|
|
|
} else if (IS_I965G(eb->i915)) {
|
|
|
|
*batch++ = MI_STORE_DWORD_IMM_GEN4;
|
|
|
|
*batch++ = 0;
|
|
|
|
*batch++ = vma_phys_addr(vma, offset);
|
|
|
|
*batch++ = target_addr;
|
2021-04-13 05:09:59 +00:00
|
|
|
} else if (ver >= 4) {
|
2020-05-04 14:06:29 +00:00
|
|
|
*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
|
|
|
|
*batch++ = 0;
|
|
|
|
*batch++ = addr;
|
|
|
|
*batch++ = target_addr;
|
2021-04-13 05:09:59 +00:00
|
|
|
} else if (ver >= 3 &&
|
2020-05-04 14:06:29 +00:00
|
|
|
!(IS_I915G(eb->i915) || IS_I915GM(eb->i915))) {
|
|
|
|
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
|
|
|
|
*batch++ = addr;
|
|
|
|
*batch++ = target_addr;
|
|
|
|
} else {
|
|
|
|
*batch++ = MI_STORE_DWORD_IMM;
|
|
|
|
*batch++ = vma_phys_addr(vma, offset);
|
|
|
|
*batch++ = target_addr;
|
2017-06-16 14:05:24 +00:00
|
|
|
}
|
|
|
|
|
2020-09-08 05:41:17 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
static int reloc_entry_gpu(struct i915_execbuffer *eb,
|
2020-09-08 05:41:17 +00:00
|
|
|
struct i915_vma *vma,
|
|
|
|
u64 offset,
|
|
|
|
u64 target_addr)
|
|
|
|
{
|
|
|
|
if (eb->reloc_cache.vaddr)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!use_reloc_gpu(vma))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return __reloc_entry_gpu(eb, vma, offset, target_addr);
|
2020-05-04 14:06:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static u64
|
2020-09-08 05:41:17 +00:00
|
|
|
relocate_entry(struct i915_vma *vma,
|
2020-05-04 14:06:29 +00:00
|
|
|
const struct drm_i915_gem_relocation_entry *reloc,
|
2020-09-08 05:41:17 +00:00
|
|
|
struct i915_execbuffer *eb,
|
2020-05-04 14:06:29 +00:00
|
|
|
const struct i915_vma *target)
|
|
|
|
{
|
|
|
|
u64 target_addr = relocation_target(reloc, target);
|
2020-09-08 05:41:17 +00:00
|
|
|
u64 offset = reloc->offset;
|
2020-08-19 14:08:48 +00:00
|
|
|
int reloc_gpu = reloc_entry_gpu(eb, vma, offset, target_addr);
|
|
|
|
|
|
|
|
if (reloc_gpu < 0)
|
|
|
|
return reloc_gpu;
|
2020-09-08 05:41:17 +00:00
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
if (!reloc_gpu) {
|
2020-09-08 05:41:17 +00:00
|
|
|
bool wide = eb->reloc_cache.use_64bit_reloc;
|
|
|
|
void *vaddr;
|
|
|
|
|
|
|
|
repeat:
|
2020-08-19 14:08:54 +00:00
|
|
|
vaddr = reloc_vaddr(vma->obj, eb,
|
2020-09-08 05:41:17 +00:00
|
|
|
offset >> PAGE_SHIFT);
|
|
|
|
if (IS_ERR(vaddr))
|
|
|
|
return PTR_ERR(vaddr);
|
|
|
|
|
|
|
|
GEM_BUG_ON(!IS_ALIGNED(offset, sizeof(u32)));
|
|
|
|
clflush_write32(vaddr + offset_in_page(offset),
|
|
|
|
lower_32_bits(target_addr),
|
|
|
|
eb->reloc_cache.vaddr);
|
|
|
|
|
|
|
|
if (wide) {
|
|
|
|
offset += sizeof(u32);
|
|
|
|
target_addr >>= 32;
|
|
|
|
wide = false;
|
|
|
|
goto repeat;
|
|
|
|
}
|
|
|
|
}
|
drm/i915: Fallback to using CPU relocations for large batch buffers
If the batch buffer is too large to fit into the aperture and we need a
GTT mapping for relocations, we currently fail. This only applies to a
subset of machines for a subset of environments, quite undesirable. We
can simply check after failing to insert the batch into the GTT as to
whether we only need a mappable binding for relocation and, if so, we can
revert to using a non-mappable binding and an alternate relocation
method. However, using relocate_entry_cpu() is excruciatingly slow for
large buffers on non-LLC as the entire buffer requires clflushing before
and after the relocation handling. Alternatively, we can implement a
third relocation method that only clflushes around the relocation entry.
This is still slower than updating through the GTT, so we prefer using
the GTT where possible, but is orders of magnitude faster as we
typically do not have to then clflush the entire buffer.
An alternative idea of using a temporary WC mapping of the backing store
is promising (it should be faster than using the GTT itself), but
requires fairly extensive arch/x86 support - along the lines of
kmap_atomic_prof_pfn() (which is not universally implemented even for
x86).
Testcase: igt/gem_exec_big #pnv,byt
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=88392
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
[danvet: Add a WARN_ONCE for the impossible reloc case and explain in
a short comment why we want to avoid ping-pong.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-01-14 11:20:56 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return target->node.start | UPDATE;
|
drm/i915: Fallback to using CPU relocations for large batch buffers
If the batch buffer is too large to fit into the aperture and we need a
GTT mapping for relocations, we currently fail. This only applies to a
subset of machines for a subset of environments, quite undesirable. We
can simply check after failing to insert the batch into the GTT as to
whether we only need a mappable binding for relocation and, if so, we can
revert to using a non-mappable binding and an alternate relocation
method. However, using relocate_entry_cpu() is excruciatingly slow for
large buffers on non-LLC as the entire buffer requires clflushing before
and after the relocation handling. Alternatively, we can implement a
third relocation method that only clflushes around the relocation entry.
This is still slower than updating through the GTT, so we prefer using
the GTT where possible, but is orders of magnitude faster as we
typically do not have to then clflush the entire buffer.
An alternative idea of using a temporary WC mapping of the backing store
is promising (it should be faster than using the GTT itself), but
requires fairly extensive arch/x86 support - along the lines of
kmap_atomic_prof_pfn() (which is not universally implemented even for
x86).
Testcase: igt/gem_exec_big #pnv,byt
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=88392
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
[danvet: Add a WARN_ONCE for the impossible reloc case and explain in
a short comment why we want to avoid ping-pong.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-01-14 11:20:56 +00:00
|
|
|
}
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
static u64
|
|
|
|
eb_relocate_entry(struct i915_execbuffer *eb,
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma *ev,
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
const struct drm_i915_gem_relocation_entry *reloc)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
drm/i915/gem: initial conversion to new logging macros using coccinelle
First pass of conversion to the new struct drm_based device logging
macros in the drm/i915/gem directory. This conversion was achieved using
the following coccinelle script that transforms based on the existence
of a straightforward struct drm_i915_private device:
@rule1@
identifier fn, T;
@@
fn(struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@rule2@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200122125750.9737-2-wambui.karugax@gmail.com
2020-01-22 12:57:49 +00:00
|
|
|
struct drm_i915_private *i915 = eb->i915;
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma *target;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
int err;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2010-12-08 10:38:14 +00:00
|
|
|
/* we've already hold a reference to all valid objects */
|
2017-06-16 14:05:17 +00:00
|
|
|
target = eb_get_vma(eb, reloc->target_handle);
|
|
|
|
if (unlikely(!target))
|
2010-11-25 18:00:26 +00:00
|
|
|
return -ENOENT;
|
2012-07-31 22:35:01 +00:00
|
|
|
|
2010-11-25 18:00:26 +00:00
|
|
|
/* Validate that the target is in a valid r/w GPU domain */
|
2010-12-08 10:43:06 +00:00
|
|
|
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
|
drm/i915/gem: initial conversion to new logging macros using coccinelle
First pass of conversion to the new struct drm_based device logging
macros in the drm/i915/gem directory. This conversion was achieved using
the following coccinelle script that transforms based on the existence
of a straightforward struct drm_i915_private device:
@rule1@
identifier fn, T;
@@
fn(struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@rule2@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200122125750.9737-2-wambui.karugax@gmail.com
2020-01-22 12:57:49 +00:00
|
|
|
drm_dbg(&i915->drm, "reloc with multiple write domains: "
|
2017-06-16 14:05:17 +00:00
|
|
|
"target %d offset %d "
|
2010-11-25 18:00:26 +00:00
|
|
|
"read %08x write %08x",
|
2017-06-16 14:05:17 +00:00
|
|
|
reloc->target_handle,
|
2010-11-25 18:00:26 +00:00
|
|
|
(int) reloc->offset,
|
|
|
|
reloc->read_domains,
|
|
|
|
reloc->write_domain);
|
2013-12-26 21:39:50 +00:00
|
|
|
return -EINVAL;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
2011-12-14 12:57:27 +00:00
|
|
|
if (unlikely((reloc->write_domain | reloc->read_domains)
|
|
|
|
& ~I915_GEM_GPU_DOMAINS)) {
|
drm/i915/gem: initial conversion to new logging macros using coccinelle
First pass of conversion to the new struct drm_based device logging
macros in the drm/i915/gem directory. This conversion was achieved using
the following coccinelle script that transforms based on the existence
of a straightforward struct drm_i915_private device:
@rule1@
identifier fn, T;
@@
fn(struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@rule2@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200122125750.9737-2-wambui.karugax@gmail.com
2020-01-22 12:57:49 +00:00
|
|
|
drm_dbg(&i915->drm, "reloc with read/write non-GPU domains: "
|
2017-06-16 14:05:17 +00:00
|
|
|
"target %d offset %d "
|
2010-11-25 18:00:26 +00:00
|
|
|
"read %08x write %08x",
|
2017-06-16 14:05:17 +00:00
|
|
|
reloc->target_handle,
|
2010-11-25 18:00:26 +00:00
|
|
|
(int) reloc->offset,
|
|
|
|
reloc->read_domains,
|
|
|
|
reloc->write_domain);
|
2013-12-26 21:39:50 +00:00
|
|
|
return -EINVAL;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (reloc->write_domain) {
|
2020-03-03 20:43:44 +00:00
|
|
|
target->flags |= EXEC_OBJECT_WRITE;
|
2017-06-16 14:05:17 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/*
|
|
|
|
* Sandybridge PPGTT errata: We need a global gtt mapping
|
|
|
|
* for MI and pipe_control writes because the gpu doesn't
|
|
|
|
* properly redirect them through the ppgtt for non_secure
|
|
|
|
* batchbuffers.
|
|
|
|
*/
|
|
|
|
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
|
2021-06-05 15:53:54 +00:00
|
|
|
GRAPHICS_VER(eb->i915) == 6) {
|
2020-03-03 20:43:44 +00:00
|
|
|
err = i915_vma_bind(target->vma,
|
|
|
|
target->vma->obj->cache_level,
|
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).
In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.
Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!
v2: Add some commentary, and some helpers to reduce patch churn.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 13:39:58 +00:00
|
|
|
PIN_GLOBAL, NULL);
|
2020-05-25 14:19:57 +00:00
|
|
|
if (err)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return err;
|
|
|
|
}
|
2017-06-16 14:05:17 +00:00
|
|
|
}
|
2010-11-25 18:00:26 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/*
|
|
|
|
* If the relocation already has the right value in it, no
|
2010-11-25 18:00:26 +00:00
|
|
|
* more work needs to be done.
|
|
|
|
*/
|
2020-09-08 05:41:17 +00:00
|
|
|
if (!DBG_FORCE_RELOC &&
|
|
|
|
gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
|
2010-12-08 10:38:14 +00:00
|
|
|
return 0;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
|
|
|
/* Check that the relocation address is valid... */
|
2013-11-03 04:07:11 +00:00
|
|
|
if (unlikely(reloc->offset >
|
2020-03-03 20:43:44 +00:00
|
|
|
ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
|
drm/i915/gem: initial conversion to new logging macros using coccinelle
First pass of conversion to the new struct drm_based device logging
macros in the drm/i915/gem directory. This conversion was achieved using
the following coccinelle script that transforms based on the existence
of a straightforward struct drm_i915_private device:
@rule1@
identifier fn, T;
@@
fn(struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@rule2@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200122125750.9737-2-wambui.karugax@gmail.com
2020-01-22 12:57:49 +00:00
|
|
|
drm_dbg(&i915->drm, "Relocation beyond object bounds: "
|
2017-06-16 14:05:17 +00:00
|
|
|
"target %d offset %d size %d.\n",
|
|
|
|
reloc->target_handle,
|
|
|
|
(int)reloc->offset,
|
2020-03-03 20:43:44 +00:00
|
|
|
(int)ev->vma->size);
|
2013-12-26 21:39:50 +00:00
|
|
|
return -EINVAL;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
2010-12-08 10:43:06 +00:00
|
|
|
if (unlikely(reloc->offset & 3)) {
|
drm/i915/gem: initial conversion to new logging macros using coccinelle
First pass of conversion to the new struct drm_based device logging
macros in the drm/i915/gem directory. This conversion was achieved using
the following coccinelle script that transforms based on the existence
of a straightforward struct drm_i915_private device:
@rule1@
identifier fn, T;
@@
fn(struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@rule2@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200122125750.9737-2-wambui.karugax@gmail.com
2020-01-22 12:57:49 +00:00
|
|
|
drm_dbg(&i915->drm, "Relocation not 4-byte aligned: "
|
2017-06-16 14:05:17 +00:00
|
|
|
"target %d offset %d.\n",
|
|
|
|
reloc->target_handle,
|
|
|
|
(int)reloc->offset);
|
2013-12-26 21:39:50 +00:00
|
|
|
return -EINVAL;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
2017-06-16 14:05:18 +00:00
|
|
|
/*
|
|
|
|
* If we write into the object, we need to force the synchronisation
|
|
|
|
* barrier, either with an asynchronous clflush or if we executed the
|
|
|
|
* patching using the GPU (though that should be serialised by the
|
|
|
|
* timeline). To be completely sure, and since we are required to
|
|
|
|
* do relocations we are already stalling, disable the user's opt
|
2017-08-16 08:52:09 +00:00
|
|
|
* out of our synchronisation.
|
2017-06-16 14:05:18 +00:00
|
|
|
*/
|
2020-03-03 20:43:44 +00:00
|
|
|
ev->flags &= ~EXEC_OBJECT_ASYNC;
|
2017-06-16 14:05:18 +00:00
|
|
|
|
2010-11-25 18:00:26 +00:00
|
|
|
/* and update the user's relocation entry */
|
2020-09-08 05:41:17 +00:00
|
|
|
return relocate_entry(ev->vma, reloc, eb, target->vma);
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
2020-03-03 20:43:44 +00:00
|
|
|
static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
2012-03-24 20:12:53 +00:00
|
|
|
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
|
2020-03-03 20:43:44 +00:00
|
|
|
const struct drm_i915_gem_exec_object2 *entry = ev->exec;
|
2020-04-07 08:59:30 +00:00
|
|
|
struct drm_i915_gem_relocation_entry __user *urelocs =
|
|
|
|
u64_to_user_ptr(entry->relocs_ptr);
|
|
|
|
unsigned long remain = entry->relocation_count;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2020-04-07 08:59:30 +00:00
|
|
|
if (unlikely(remain > N_RELOC(ULONG_MAX)))
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return -EINVAL;
|
2016-10-18 12:02:51 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/*
|
|
|
|
* We must check that the entire relocation array is safe
|
|
|
|
* to read. However, if the array is not writable the user loses
|
|
|
|
* the updated relocation values.
|
|
|
|
*/
|
2020-04-07 08:59:30 +00:00
|
|
|
if (unlikely(!access_ok(urelocs, remain * sizeof(*urelocs))))
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
do {
|
|
|
|
struct drm_i915_gem_relocation_entry *r = stack;
|
|
|
|
unsigned int count =
|
2020-04-07 08:59:30 +00:00
|
|
|
min_t(unsigned long, remain, ARRAY_SIZE(stack));
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
unsigned int copied;
|
2012-03-24 20:12:53 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/*
|
|
|
|
* This is the fast path and we cannot handle a pagefault
|
2016-10-18 12:02:51 +00:00
|
|
|
* whilst holding the struct mutex lest the user pass in the
|
|
|
|
* relocations contained within a mmaped bo. For in such a case
|
|
|
|
* we, the page fault handler would call i915_gem_fault() and
|
|
|
|
* we would try to acquire the struct mutex again. Obviously
|
|
|
|
* this is bad and so lockdep complains vehemently.
|
|
|
|
*/
|
2020-08-19 14:08:43 +00:00
|
|
|
pagefault_disable();
|
|
|
|
copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
|
|
|
|
pagefault_enable();
|
2020-09-08 05:41:17 +00:00
|
|
|
if (unlikely(copied)) {
|
|
|
|
remain = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
2010-11-25 18:00:26 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
remain -= count;
|
2012-03-24 20:12:53 +00:00
|
|
|
do {
|
2020-03-03 20:43:44 +00:00
|
|
|
u64 offset = eb_relocate_entry(eb, ev, r);
|
2010-11-25 18:00:26 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (likely(offset == 0)) {
|
|
|
|
} else if ((s64)offset < 0) {
|
2020-09-08 05:41:17 +00:00
|
|
|
remain = (int)offset;
|
|
|
|
goto out;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Note that reporting an error now
|
|
|
|
* leaves everything in an inconsistent
|
|
|
|
* state as we have *already* changed
|
|
|
|
* the relocation value inside the
|
|
|
|
* object. As we have not changed the
|
|
|
|
* reloc.presumed_offset or will not
|
|
|
|
* change the execobject.offset, on the
|
|
|
|
* call we may not rewrite the value
|
|
|
|
* inside the object, leaving it
|
|
|
|
* dangling and causing a GPU hang. Unless
|
|
|
|
* userspace dynamically rebuilds the
|
|
|
|
* relocations on each execbuf rather than
|
|
|
|
* presume a static tree.
|
|
|
|
*
|
|
|
|
* We did previously check if the relocations
|
|
|
|
* were writable (access_ok), an error now
|
|
|
|
* would be a strange race with mprotect,
|
|
|
|
* having already demonstrated that we
|
|
|
|
* can read from this userspace address.
|
|
|
|
*/
|
|
|
|
offset = gen8_canonical_addr(offset & ~UPDATE);
|
2020-03-31 16:21:50 +00:00
|
|
|
__put_user(offset,
|
|
|
|
&urelocs[r - stack].presumed_offset);
|
2012-03-24 20:12:53 +00:00
|
|
|
}
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
} while (r++, --count);
|
|
|
|
urelocs += ARRAY_SIZE(stack);
|
|
|
|
} while (remain);
|
2020-09-08 05:41:17 +00:00
|
|
|
out:
|
2020-08-19 14:08:48 +00:00
|
|
|
reloc_cache_reset(&eb->reloc_cache, eb);
|
2020-09-08 05:41:17 +00:00
|
|
|
return remain;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
static int
|
|
|
|
eb_relocate_vma_slow(struct i915_execbuffer *eb, struct eb_vma *ev)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
2020-08-19 14:08:43 +00:00
|
|
|
const struct drm_i915_gem_exec_object2 *entry = ev->exec;
|
|
|
|
struct drm_i915_gem_relocation_entry *relocs =
|
|
|
|
u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
|
|
|
|
unsigned int i;
|
2020-03-03 20:43:45 +00:00
|
|
|
int err;
|
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
for (i = 0; i < entry->relocation_count; i++) {
|
|
|
|
u64 offset = eb_relocate_entry(eb, ev, &relocs[i]);
|
2020-03-03 20:43:45 +00:00
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
if ((s64)offset < 0) {
|
|
|
|
err = (int)offset;
|
|
|
|
goto err;
|
|
|
|
}
|
2020-03-06 07:16:14 +00:00
|
|
|
}
|
2020-08-19 14:08:43 +00:00
|
|
|
err = 0;
|
|
|
|
err:
|
2020-08-19 14:08:48 +00:00
|
|
|
reloc_cache_reset(&eb->reloc_cache, eb);
|
2020-08-19 14:08:43 +00:00
|
|
|
return err;
|
|
|
|
}
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
|
|
|
|
{
|
|
|
|
const char __user *addr, *end;
|
|
|
|
unsigned long size;
|
|
|
|
char __maybe_unused c;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
size = entry->relocation_count;
|
|
|
|
if (size == 0)
|
|
|
|
return 0;
|
2020-05-01 19:29:44 +00:00
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
if (size > N_RELOC(ULONG_MAX))
|
|
|
|
return -EINVAL;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
addr = u64_to_user_ptr(entry->relocs_ptr);
|
|
|
|
size *= sizeof(struct drm_i915_gem_relocation_entry);
|
|
|
|
if (!access_ok(addr, size))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
end = addr + size;
|
|
|
|
for (; addr < end; addr += PAGE_SIZE) {
|
|
|
|
int err = __get_user(c, addr);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
return __get_user(c, end - 1);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
static int eb_copy_relocations(const struct i915_execbuffer *eb)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
{
|
2020-08-19 14:08:43 +00:00
|
|
|
struct drm_i915_gem_relocation_entry *relocs;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
const unsigned int count = eb->buffer_count;
|
|
|
|
unsigned int i;
|
2020-08-19 14:08:43 +00:00
|
|
|
int err;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
for (i = 0; i < count; i++) {
|
2020-08-19 14:08:43 +00:00
|
|
|
const unsigned int nreloc = eb->exec[i].relocation_count;
|
|
|
|
struct drm_i915_gem_relocation_entry __user *urelocs;
|
|
|
|
unsigned long size;
|
|
|
|
unsigned long copied;
|
2019-05-28 09:29:51 +00:00
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
if (nreloc == 0)
|
|
|
|
continue;
|
2019-05-28 09:29:51 +00:00
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
err = check_relocations(&eb->exec[i]);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
2019-05-28 09:29:51 +00:00
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
|
|
|
|
size = nreloc * sizeof(*relocs);
|
2019-05-28 09:29:51 +00:00
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
relocs = kvmalloc_array(size, 1, GFP_KERNEL);
|
|
|
|
if (!relocs) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err;
|
2019-05-28 09:29:51 +00:00
|
|
|
}
|
2020-08-19 14:08:43 +00:00
|
|
|
|
|
|
|
/* copy_from_user is limited to < 4GiB */
|
|
|
|
copied = 0;
|
|
|
|
do {
|
|
|
|
unsigned int len =
|
|
|
|
min_t(u64, BIT_ULL(31), size - copied);
|
|
|
|
|
|
|
|
if (__copy_from_user((char *)relocs + copied,
|
|
|
|
(char __user *)urelocs + copied,
|
|
|
|
len))
|
|
|
|
goto end;
|
|
|
|
|
|
|
|
copied += len;
|
|
|
|
} while (copied < size);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* As we do not update the known relocation offsets after
|
|
|
|
* relocating (due to the complexities in lock handling),
|
|
|
|
* we need to mark them as invalid now so that we force the
|
|
|
|
* relocation processing next time. Just in case the target
|
|
|
|
* object is evicted and then rebound into its old
|
|
|
|
* presumed_offset before the next execbuffer - if that
|
|
|
|
* happened we would make the mistake of assuming that the
|
|
|
|
* relocations were valid.
|
|
|
|
*/
|
|
|
|
if (!user_access_begin(urelocs, size))
|
|
|
|
goto end;
|
|
|
|
|
|
|
|
for (copied = 0; copied < nreloc; copied++)
|
|
|
|
unsafe_put_user(-1,
|
|
|
|
&urelocs[copied].presumed_offset,
|
|
|
|
end_user);
|
|
|
|
user_access_end();
|
|
|
|
|
|
|
|
eb->exec[i].relocs_ptr = (uintptr_t)relocs;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
end_user:
|
|
|
|
user_access_end();
|
|
|
|
end:
|
|
|
|
kvfree(relocs);
|
|
|
|
err = -EFAULT;
|
|
|
|
err:
|
|
|
|
while (i--) {
|
|
|
|
relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
|
|
|
|
if (eb->exec[i].relocation_count)
|
|
|
|
kvfree(relocs);
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int eb_prefault_relocations(const struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
const unsigned int count = eb->buffer_count;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = check_relocations(&eb->exec[i]);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
static int eb_reinit_userptr(struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
const unsigned int count = eb->buffer_count;
|
|
|
|
unsigned int i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (likely(!(eb->args->flags & __EXEC_USERPTR_USED)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
struct eb_vma *ev = &eb->vma[i];
|
|
|
|
|
|
|
|
if (!i915_gem_object_is_userptr(ev->vma->obj))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = i915_gem_object_userptr_submit_init(ev->vma->obj);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ev->flags |= __EXEC_OBJECT_USERPTR_INIT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb,
|
|
|
|
struct i915_request *rq)
|
2020-08-19 14:08:43 +00:00
|
|
|
{
|
|
|
|
bool have_copy = false;
|
|
|
|
struct eb_vma *ev;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
repeat:
|
|
|
|
if (signal_pending(current)) {
|
|
|
|
err = -ERESTARTSYS;
|
|
|
|
goto out;
|
2019-05-28 09:29:51 +00:00
|
|
|
}
|
2020-08-19 14:08:43 +00:00
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
/* We may process another execbuffer during the unlock... */
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
eb_release_vmas(eb, false, true);
|
2020-08-19 14:08:48 +00:00
|
|
|
i915_gem_ww_ctx_fini(&eb->ww);
|
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
if (rq) {
|
|
|
|
/* nonblocking is always false */
|
|
|
|
if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE,
|
|
|
|
MAX_SCHEDULE_TIMEOUT) < 0) {
|
|
|
|
i915_request_put(rq);
|
|
|
|
rq = NULL;
|
|
|
|
|
|
|
|
err = -EINTR;
|
|
|
|
goto err_relock;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_request_put(rq);
|
|
|
|
rq = NULL;
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
/*
|
|
|
|
* We take 3 passes through the slowpatch.
|
|
|
|
*
|
|
|
|
* 1 - we try to just prefault all the user relocation entries and
|
|
|
|
* then attempt to reuse the atomic pagefault disabled fast path again.
|
|
|
|
*
|
|
|
|
* 2 - we copy the user entries to a local buffer here outside of the
|
|
|
|
* local and allow ourselves to wait upon any rendering before
|
|
|
|
* relocations
|
|
|
|
*
|
|
|
|
* 3 - we already have a local copy of the relocation entries, but
|
|
|
|
* were interrupted (EAGAIN) whilst waiting for the objects, try again.
|
|
|
|
*/
|
|
|
|
if (!err) {
|
|
|
|
err = eb_prefault_relocations(eb);
|
|
|
|
} else if (!have_copy) {
|
|
|
|
err = eb_copy_relocations(eb);
|
|
|
|
have_copy = err == 0;
|
|
|
|
} else {
|
|
|
|
cond_resched();
|
|
|
|
err = 0;
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
if (!err)
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
err = eb_reinit_userptr(eb);
|
2020-08-19 14:08:43 +00:00
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
err_relock:
|
2020-08-19 14:08:48 +00:00
|
|
|
i915_gem_ww_ctx_init(&eb->ww, true);
|
2020-08-19 14:08:43 +00:00
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
/* reacquire the objects */
|
|
|
|
repeat_validate:
|
2020-08-19 14:08:52 +00:00
|
|
|
rq = eb_pin_engine(eb, false);
|
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
2020-08-19 14:08:54 +00:00
|
|
|
rq = NULL;
|
2020-08-19 14:08:52 +00:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We didn't throttle, should be NULL */
|
|
|
|
GEM_WARN_ON(rq);
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
err = eb_validate_vmas(eb);
|
2020-08-19 14:08:43 +00:00
|
|
|
if (err)
|
2020-08-19 14:08:48 +00:00
|
|
|
goto err;
|
|
|
|
|
|
|
|
GEM_BUG_ON(!eb->batch);
|
2020-08-19 14:08:43 +00:00
|
|
|
|
|
|
|
list_for_each_entry(ev, &eb->relocs, reloc_link) {
|
|
|
|
if (!have_copy) {
|
|
|
|
pagefault_disable();
|
|
|
|
err = eb_relocate_vma(eb, ev);
|
|
|
|
pagefault_enable();
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
err = eb_relocate_vma_slow(eb, ev);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
if (err == -EDEADLK)
|
|
|
|
goto err;
|
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
if (err && !have_copy)
|
|
|
|
goto repeat;
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
|
2020-08-19 14:08:47 +00:00
|
|
|
/* as last step, parse the command buffer */
|
|
|
|
err = eb_parse(eb);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
/*
|
|
|
|
* Leave the user relocations as are, this is the painfully slow path,
|
|
|
|
* and we want to avoid the complication of dropping the lock whilst
|
|
|
|
* having buffers reserved in the aperture and so causing spurious
|
|
|
|
* ENOSPC for random operations.
|
|
|
|
*/
|
|
|
|
|
|
|
|
err:
|
2020-08-19 14:08:48 +00:00
|
|
|
if (err == -EDEADLK) {
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
eb_release_vmas(eb, false, false);
|
2020-08-19 14:08:48 +00:00
|
|
|
err = i915_gem_ww_ctx_backoff(&eb->ww);
|
|
|
|
if (!err)
|
|
|
|
goto repeat_validate;
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
if (err == -EAGAIN)
|
|
|
|
goto repeat;
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (have_copy) {
|
|
|
|
const unsigned int count = eb->buffer_count;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
const struct drm_i915_gem_exec_object2 *entry =
|
|
|
|
&eb->exec[i];
|
|
|
|
struct drm_i915_gem_relocation_entry *relocs;
|
|
|
|
|
|
|
|
if (!entry->relocation_count)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
|
|
|
|
kvfree(relocs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
if (rq)
|
|
|
|
i915_request_put(rq);
|
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:47 +00:00
|
|
|
static int eb_relocate_parse(struct i915_execbuffer *eb)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
2020-03-03 20:43:45 +00:00
|
|
|
int err;
|
2020-08-19 14:08:52 +00:00
|
|
|
struct i915_request *rq = NULL;
|
|
|
|
bool throttle = true;
|
2020-03-03 20:43:45 +00:00
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
retry:
|
2020-08-19 14:08:52 +00:00
|
|
|
rq = eb_pin_engine(eb, throttle);
|
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
rq = NULL;
|
|
|
|
if (err != -EDEADLK)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rq) {
|
|
|
|
bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
|
|
|
|
|
|
|
|
/* Need to drop all locks now for throttling, take slowpath */
|
|
|
|
err = i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, 0);
|
|
|
|
if (err == -ETIME) {
|
|
|
|
if (nonblock) {
|
|
|
|
err = -EWOULDBLOCK;
|
|
|
|
i915_request_put(rq);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
goto slow;
|
|
|
|
}
|
|
|
|
i915_request_put(rq);
|
|
|
|
rq = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* only throttle once, even if we didn't need to throttle */
|
|
|
|
throttle = false;
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
err = eb_validate_vmas(eb);
|
|
|
|
if (err == -EAGAIN)
|
|
|
|
goto slow;
|
|
|
|
else if (err)
|
|
|
|
goto err;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
|
|
|
/* The objects are in their final locations, apply the relocations. */
|
|
|
|
if (eb->args->flags & __EXEC_HAS_RELOC) {
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma *ev;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2020-03-03 20:43:44 +00:00
|
|
|
list_for_each_entry(ev, &eb->relocs, reloc_link) {
|
2020-03-11 16:03:10 +00:00
|
|
|
err = eb_relocate_vma(eb, ev);
|
|
|
|
if (err)
|
2020-08-19 14:08:43 +00:00
|
|
|
break;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
}
|
2020-08-19 14:08:43 +00:00
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
if (err == -EDEADLK)
|
|
|
|
goto err;
|
|
|
|
else if (err)
|
|
|
|
goto slow;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!err)
|
|
|
|
err = eb_parse(eb);
|
|
|
|
|
|
|
|
err:
|
|
|
|
if (err == -EDEADLK) {
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
eb_release_vmas(eb, false, false);
|
2020-08-19 14:08:48 +00:00
|
|
|
err = i915_gem_ww_ctx_backoff(&eb->ww);
|
|
|
|
if (!err)
|
|
|
|
goto retry;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
return err;
|
|
|
|
|
|
|
|
slow:
|
2020-08-19 14:08:52 +00:00
|
|
|
err = eb_relocate_parse_slow(eb, rq);
|
2020-08-19 14:08:48 +00:00
|
|
|
if (err)
|
|
|
|
/*
|
|
|
|
* If the user expects the execobject.offset and
|
|
|
|
* reloc.presumed_offset to be an exact match,
|
|
|
|
* as for using NO_RELOC, then we cannot update
|
|
|
|
* the execobject.offset until we have completed
|
|
|
|
* relocation.
|
|
|
|
*/
|
|
|
|
eb->args->flags &= ~__EXEC_HAS_RELOC;
|
|
|
|
|
|
|
|
return err;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int eb_move_to_gpu(struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
const unsigned int count = eb->buffer_count;
|
2020-08-19 14:08:48 +00:00
|
|
|
unsigned int i = count;
|
2019-05-28 09:29:51 +00:00
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
while (i--) {
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma *ev = &eb->vma[i];
|
|
|
|
struct i915_vma *vma = ev->vma;
|
|
|
|
unsigned int flags = ev->flags;
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
struct drm_i915_gem_object *obj = vma->obj;
|
2015-04-27 12:41:18 +00:00
|
|
|
|
2019-05-28 09:29:51 +00:00
|
|
|
assert_vma_held(vma);
|
|
|
|
|
2017-08-16 08:52:06 +00:00
|
|
|
if (flags & EXEC_OBJECT_CAPTURE) {
|
2018-02-21 09:56:36 +00:00
|
|
|
struct i915_capture_list *capture;
|
2017-04-15 09:39:02 +00:00
|
|
|
|
|
|
|
capture = kmalloc(sizeof(*capture), GFP_KERNEL);
|
2019-05-28 09:29:51 +00:00
|
|
|
if (capture) {
|
|
|
|
capture->next = eb->request->capture_list;
|
|
|
|
capture->vma = vma;
|
|
|
|
eb->request->capture_list = capture;
|
|
|
|
}
|
2017-04-15 09:39:02 +00:00
|
|
|
}
|
|
|
|
|
2017-08-11 11:11:16 +00:00
|
|
|
/*
|
|
|
|
* If the GPU is not _reading_ through the CPU cache, we need
|
|
|
|
* to make sure that any writes (both previous GPU writes from
|
|
|
|
* before a change in snooping levels and normal CPU writes)
|
|
|
|
* caught in that cache are flushed to main memory.
|
|
|
|
*
|
|
|
|
* We want to say
|
|
|
|
* obj->cache_dirty &&
|
|
|
|
* !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
|
|
|
|
* but gcc's optimiser doesn't handle that as well and emits
|
|
|
|
* two jumps instead of one. Maybe one day...
|
|
|
|
*/
|
|
|
|
if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) {
|
2017-07-21 14:50:37 +00:00
|
|
|
if (i915_gem_clflush_object(obj, 0))
|
2017-08-16 08:52:06 +00:00
|
|
|
flags &= ~EXEC_OBJECT_ASYNC;
|
2017-07-21 14:50:37 +00:00
|
|
|
}
|
|
|
|
|
2019-05-28 09:29:51 +00:00
|
|
|
if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) {
|
|
|
|
err = i915_request_await_object
|
|
|
|
(eb->request, obj, flags & EXEC_OBJECT_WRITE);
|
|
|
|
}
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2019-05-28 09:29:51 +00:00
|
|
|
if (err == 0)
|
2021-03-23 15:49:59 +00:00
|
|
|
err = i915_vma_move_to_active(vma, eb->request,
|
|
|
|
flags | __EXEC_OBJECT_NO_RESERVE);
|
2011-03-06 13:51:29 +00:00
|
|
|
}
|
2020-03-30 13:37:10 +00:00
|
|
|
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
#ifdef CONFIG_MMU_NOTIFIER
|
|
|
|
if (!err && (eb->args->flags & __EXEC_USERPTR_USED)) {
|
|
|
|
spin_lock(&eb->i915->mm.notifier_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* count is always at least 1, otherwise __EXEC_USERPTR_USED
|
|
|
|
* could not have been set
|
|
|
|
*/
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
struct eb_vma *ev = &eb->vma[i];
|
|
|
|
struct drm_i915_gem_object *obj = ev->vma->obj;
|
|
|
|
|
|
|
|
if (!i915_gem_object_is_userptr(obj))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
err = i915_gem_object_userptr_submit_done(obj);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock(&eb->i915->mm.notifier_lock);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-05-28 09:29:51 +00:00
|
|
|
if (unlikely(err))
|
|
|
|
goto err_skip;
|
|
|
|
|
2016-08-18 16:16:40 +00:00
|
|
|
/* Unconditionally flush any chipset caches (for streaming writes). */
|
2019-06-21 07:08:02 +00:00
|
|
|
intel_gt_chipset_flush(eb->engine->gt);
|
2017-11-20 10:20:01 +00:00
|
|
|
return 0;
|
2019-05-28 09:29:51 +00:00
|
|
|
|
|
|
|
err_skip:
|
2020-03-04 12:18:48 +00:00
|
|
|
i915_request_set_error_once(eb->request, err);
|
2019-05-28 09:29:51 +00:00
|
|
|
return err;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
2019-12-09 12:23:14 +00:00
|
|
|
static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
2017-06-15 08:14:33 +00:00
|
|
|
if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
|
2019-12-09 12:23:14 +00:00
|
|
|
return -EINVAL;
|
2013-01-17 21:23:36 +00:00
|
|
|
|
2015-10-06 10:39:55 +00:00
|
|
|
/* Kernel clipping was a DRI1 misfeature */
|
2020-08-04 08:59:53 +00:00
|
|
|
if (!(exec->flags & (I915_EXEC_FENCE_ARRAY |
|
|
|
|
I915_EXEC_USE_EXTENSIONS))) {
|
2017-08-15 14:57:33 +00:00
|
|
|
if (exec->num_cliprects || exec->cliprects_ptr)
|
2019-12-09 12:23:14 +00:00
|
|
|
return -EINVAL;
|
2017-08-15 14:57:33 +00:00
|
|
|
}
|
2015-10-06 10:39:55 +00:00
|
|
|
|
|
|
|
if (exec->DR4 == 0xffffffff) {
|
|
|
|
DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
|
|
|
|
exec->DR4 = 0;
|
|
|
|
}
|
|
|
|
if (exec->DR1 || exec->DR4)
|
2019-12-09 12:23:14 +00:00
|
|
|
return -EINVAL;
|
2015-10-06 10:39:55 +00:00
|
|
|
|
|
|
|
if ((exec->batch_start_offset | exec->batch_len) & 0x7)
|
2019-12-09 12:23:14 +00:00
|
|
|
return -EINVAL;
|
2015-10-06 10:39:55 +00:00
|
|
|
|
2019-12-09 12:23:14 +00:00
|
|
|
return 0;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
2018-02-21 09:56:36 +00:00
|
|
|
static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
|
2012-01-03 17:23:29 +00:00
|
|
|
{
|
2017-02-14 11:32:42 +00:00
|
|
|
u32 *cs;
|
|
|
|
int i;
|
2012-01-03 17:23:29 +00:00
|
|
|
|
2021-06-05 15:53:54 +00:00
|
|
|
if (GRAPHICS_VER(rq->engine->i915) != 7 || rq->engine->id != RCS0) {
|
2020-06-02 22:09:53 +00:00
|
|
|
drm_dbg(&rq->engine->i915->drm, "sol reset is gen7/rcs only\n");
|
2014-04-24 06:09:09 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2012-01-03 17:23:29 +00:00
|
|
|
|
2018-02-21 09:56:36 +00:00
|
|
|
cs = intel_ring_begin(rq, 4 * 2 + 2);
|
2017-02-14 11:32:42 +00:00
|
|
|
if (IS_ERR(cs))
|
|
|
|
return PTR_ERR(cs);
|
2012-01-03 17:23:29 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
*cs++ = MI_LOAD_REGISTER_IMM(4);
|
2012-01-03 17:23:29 +00:00
|
|
|
for (i = 0; i < 4; i++) {
|
2017-02-14 11:32:42 +00:00
|
|
|
*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
|
|
|
|
*cs++ = 0;
|
2012-01-03 17:23:29 +00:00
|
|
|
}
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
*cs++ = MI_NOOP;
|
2018-02-21 09:56:36 +00:00
|
|
|
intel_ring_advance(rq, cs);
|
2012-01-03 17:23:29 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-22 20:59:06 +00:00
|
|
|
static struct i915_vma *
|
2020-08-19 14:08:54 +00:00
|
|
|
shadow_batch_pin(struct i915_execbuffer *eb,
|
|
|
|
struct drm_i915_gem_object *obj,
|
2019-12-11 23:08:56 +00:00
|
|
|
struct i915_address_space *vm,
|
|
|
|
unsigned int flags)
|
2018-05-22 20:59:06 +00:00
|
|
|
{
|
2019-11-15 17:08:35 +00:00
|
|
|
struct i915_vma *vma;
|
|
|
|
int err;
|
2018-05-22 20:59:06 +00:00
|
|
|
|
2019-11-15 17:08:35 +00:00
|
|
|
vma = i915_vma_instance(obj, vm, NULL);
|
|
|
|
if (IS_ERR(vma))
|
|
|
|
return vma;
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, flags);
|
2019-11-15 17:08:35 +00:00
|
|
|
if (err)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
|
|
|
|
return vma;
|
2018-05-22 20:59:06 +00:00
|
|
|
}
|
|
|
|
|
2019-12-11 23:08:57 +00:00
|
|
|
struct eb_parse_work {
|
|
|
|
struct dma_fence_work base;
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
struct i915_vma *batch;
|
|
|
|
struct i915_vma *shadow;
|
|
|
|
struct i915_vma *trampoline;
|
2020-09-28 21:59:42 +00:00
|
|
|
unsigned long batch_offset;
|
|
|
|
unsigned long batch_length;
|
2021-03-23 15:49:52 +00:00
|
|
|
unsigned long *jump_whitelist;
|
|
|
|
const void *batch_map;
|
|
|
|
void *shadow_map;
|
2019-12-11 23:08:57 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int __eb_parse(struct dma_fence_work *work)
|
|
|
|
{
|
|
|
|
struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
|
2021-03-23 15:49:52 +00:00
|
|
|
int ret;
|
|
|
|
bool cookie;
|
|
|
|
|
|
|
|
cookie = dma_fence_begin_signalling();
|
|
|
|
ret = intel_engine_cmd_parser(pw->engine,
|
|
|
|
pw->batch,
|
|
|
|
pw->batch_offset,
|
|
|
|
pw->batch_length,
|
|
|
|
pw->shadow,
|
|
|
|
pw->jump_whitelist,
|
|
|
|
pw->shadow_map,
|
|
|
|
pw->batch_map);
|
|
|
|
dma_fence_end_signalling(cookie);
|
|
|
|
|
|
|
|
return ret;
|
2019-12-11 23:08:57 +00:00
|
|
|
}
|
|
|
|
|
2020-01-13 15:45:55 +00:00
|
|
|
static void __eb_parse_release(struct dma_fence_work *work)
|
|
|
|
{
|
|
|
|
struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
|
|
|
|
|
2021-03-23 15:49:52 +00:00
|
|
|
if (!IS_ERR_OR_NULL(pw->jump_whitelist))
|
|
|
|
kfree(pw->jump_whitelist);
|
|
|
|
|
|
|
|
if (pw->batch_map)
|
|
|
|
i915_gem_object_unpin_map(pw->batch->obj);
|
|
|
|
else
|
|
|
|
i915_gem_object_unpin_pages(pw->batch->obj);
|
|
|
|
|
|
|
|
i915_gem_object_unpin_map(pw->shadow->obj);
|
|
|
|
|
2020-01-13 15:45:55 +00:00
|
|
|
if (pw->trampoline)
|
|
|
|
i915_active_release(&pw->trampoline->active);
|
|
|
|
i915_active_release(&pw->shadow->active);
|
|
|
|
i915_active_release(&pw->batch->active);
|
|
|
|
}
|
|
|
|
|
2019-12-11 23:08:57 +00:00
|
|
|
static const struct dma_fence_work_ops eb_parse_ops = {
|
|
|
|
.name = "eb_parse",
|
|
|
|
.work = __eb_parse,
|
2020-01-13 15:45:55 +00:00
|
|
|
.release = __eb_parse_release,
|
2019-12-11 23:08:57 +00:00
|
|
|
};
|
|
|
|
|
2020-06-04 10:37:30 +00:00
|
|
|
static inline int
|
|
|
|
__parser_mark_active(struct i915_vma *vma,
|
|
|
|
struct intel_timeline *tl,
|
|
|
|
struct dma_fence *fence)
|
|
|
|
{
|
|
|
|
struct intel_gt_buffer_pool_node *node = vma->private;
|
|
|
|
|
2020-07-31 08:50:11 +00:00
|
|
|
return i915_active_ref(&node->active, tl->fence_context, fence);
|
2020-06-04 10:37:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
parser_mark_active(struct eb_parse_work *pw, struct intel_timeline *tl)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
mutex_lock(&tl->mutex);
|
|
|
|
|
|
|
|
err = __parser_mark_active(pw->shadow, tl, &pw->base.dma);
|
|
|
|
if (err)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
if (pw->trampoline) {
|
|
|
|
err = __parser_mark_active(pw->trampoline, tl, &pw->base.dma);
|
|
|
|
if (err)
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
mutex_unlock(&tl->mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-12-11 23:08:57 +00:00
|
|
|
static int eb_parse_pipeline(struct i915_execbuffer *eb,
|
|
|
|
struct i915_vma *shadow,
|
|
|
|
struct i915_vma *trampoline)
|
|
|
|
{
|
|
|
|
struct eb_parse_work *pw;
|
2021-03-23 15:49:52 +00:00
|
|
|
struct drm_i915_gem_object *batch = eb->batch->vma->obj;
|
|
|
|
bool needs_clflush;
|
2019-12-11 23:08:57 +00:00
|
|
|
int err;
|
|
|
|
|
2020-09-28 21:59:42 +00:00
|
|
|
GEM_BUG_ON(overflows_type(eb->batch_start_offset, pw->batch_offset));
|
|
|
|
GEM_BUG_ON(overflows_type(eb->batch_len, pw->batch_length));
|
|
|
|
|
2019-12-11 23:08:57 +00:00
|
|
|
pw = kzalloc(sizeof(*pw), GFP_KERNEL);
|
|
|
|
if (!pw)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2020-03-03 20:43:44 +00:00
|
|
|
err = i915_active_acquire(&eb->batch->vma->active);
|
2020-01-13 15:45:55 +00:00
|
|
|
if (err)
|
|
|
|
goto err_free;
|
|
|
|
|
|
|
|
err = i915_active_acquire(&shadow->active);
|
|
|
|
if (err)
|
|
|
|
goto err_batch;
|
|
|
|
|
|
|
|
if (trampoline) {
|
|
|
|
err = i915_active_acquire(&trampoline->active);
|
|
|
|
if (err)
|
|
|
|
goto err_shadow;
|
|
|
|
}
|
|
|
|
|
2021-03-23 15:49:52 +00:00
|
|
|
pw->shadow_map = i915_gem_object_pin_map(shadow->obj, I915_MAP_WB);
|
|
|
|
if (IS_ERR(pw->shadow_map)) {
|
|
|
|
err = PTR_ERR(pw->shadow_map);
|
|
|
|
goto err_trampoline;
|
|
|
|
}
|
|
|
|
|
|
|
|
needs_clflush =
|
|
|
|
!(batch->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ);
|
|
|
|
|
|
|
|
pw->batch_map = ERR_PTR(-ENODEV);
|
|
|
|
if (needs_clflush && i915_has_memcpy_from_wc())
|
|
|
|
pw->batch_map = i915_gem_object_pin_map(batch, I915_MAP_WC);
|
|
|
|
|
|
|
|
if (IS_ERR(pw->batch_map)) {
|
|
|
|
err = i915_gem_object_pin_pages(batch);
|
|
|
|
if (err)
|
|
|
|
goto err_unmap_shadow;
|
|
|
|
pw->batch_map = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
pw->jump_whitelist =
|
|
|
|
intel_engine_cmd_parser_alloc_jump_whitelist(eb->batch_len,
|
|
|
|
trampoline);
|
|
|
|
if (IS_ERR(pw->jump_whitelist)) {
|
|
|
|
err = PTR_ERR(pw->jump_whitelist);
|
|
|
|
goto err_unmap_batch;
|
|
|
|
}
|
|
|
|
|
2019-12-11 23:08:57 +00:00
|
|
|
dma_fence_work_init(&pw->base, &eb_parse_ops);
|
|
|
|
|
|
|
|
pw->engine = eb->engine;
|
2020-03-03 20:43:44 +00:00
|
|
|
pw->batch = eb->batch->vma;
|
2019-12-11 23:08:57 +00:00
|
|
|
pw->batch_offset = eb->batch_start_offset;
|
|
|
|
pw->batch_length = eb->batch_len;
|
|
|
|
pw->shadow = shadow;
|
|
|
|
pw->trampoline = trampoline;
|
|
|
|
|
2020-06-04 10:37:30 +00:00
|
|
|
/* Mark active refs early for this worker, in case we get interrupted */
|
|
|
|
err = parser_mark_active(pw, eb->context->timeline);
|
|
|
|
if (err)
|
|
|
|
goto err_commit;
|
|
|
|
|
2019-12-11 23:08:57 +00:00
|
|
|
err = dma_resv_reserve_shared(pw->batch->resv, 1);
|
|
|
|
if (err)
|
2020-08-19 14:08:48 +00:00
|
|
|
goto err_commit;
|
2019-12-11 23:08:57 +00:00
|
|
|
|
2021-03-23 15:49:59 +00:00
|
|
|
err = dma_resv_reserve_shared(shadow->resv, 1);
|
|
|
|
if (err)
|
|
|
|
goto err_commit;
|
|
|
|
|
2019-12-11 23:08:57 +00:00
|
|
|
/* Wait for all writes (and relocs) into the batch to complete */
|
|
|
|
err = i915_sw_fence_await_reservation(&pw->base.chain,
|
|
|
|
pw->batch->resv, NULL, false,
|
|
|
|
0, I915_FENCE_GFP);
|
|
|
|
if (err < 0)
|
2020-08-19 14:08:48 +00:00
|
|
|
goto err_commit;
|
2019-12-11 23:08:57 +00:00
|
|
|
|
|
|
|
/* Keep the batch alive and unwritten as we parse */
|
|
|
|
dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);
|
|
|
|
|
|
|
|
/* Force execution to wait for completion of the parser */
|
|
|
|
dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
|
|
|
|
|
2020-03-25 12:02:27 +00:00
|
|
|
dma_fence_work_commit_imm(&pw->base);
|
2019-12-11 23:08:57 +00:00
|
|
|
return 0;
|
|
|
|
|
2020-06-04 10:37:30 +00:00
|
|
|
err_commit:
|
|
|
|
i915_sw_fence_set_error_once(&pw->base.chain, err);
|
|
|
|
dma_fence_work_commit_imm(&pw->base);
|
|
|
|
return err;
|
|
|
|
|
2021-03-23 15:49:52 +00:00
|
|
|
err_unmap_batch:
|
|
|
|
if (pw->batch_map)
|
|
|
|
i915_gem_object_unpin_map(batch);
|
|
|
|
else
|
|
|
|
i915_gem_object_unpin_pages(batch);
|
|
|
|
err_unmap_shadow:
|
|
|
|
i915_gem_object_unpin_map(shadow->obj);
|
|
|
|
err_trampoline:
|
|
|
|
if (trampoline)
|
|
|
|
i915_active_release(&trampoline->active);
|
2020-01-13 15:45:55 +00:00
|
|
|
err_shadow:
|
|
|
|
i915_active_release(&shadow->active);
|
|
|
|
err_batch:
|
2020-03-03 20:43:44 +00:00
|
|
|
i915_active_release(&eb->batch->vma->active);
|
2020-01-13 15:45:55 +00:00
|
|
|
err_free:
|
2019-12-11 23:08:57 +00:00
|
|
|
kfree(pw);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i915_vma *vma)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
|
|
|
|
* batch" bit. Hence we need to pin secure batches into the global gtt.
|
|
|
|
* hsw should have this fixed, but bdw mucks it up again. */
|
|
|
|
if (eb->batch_flags & I915_DISPATCH_SECURE)
|
|
|
|
return i915_gem_object_ggtt_pin_ww(vma->obj, &eb->ww, NULL, 0, 0, 0);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-12-11 11:04:36 +00:00
|
|
|
static int eb_parse(struct i915_execbuffer *eb)
|
2014-12-11 20:13:12 +00:00
|
|
|
{
|
drm/i915/gem: initial conversion to new logging macros using coccinelle
First pass of conversion to the new struct drm_based device logging
macros in the drm/i915/gem directory. This conversion was achieved using
the following coccinelle script that transforms based on the existence
of a straightforward struct drm_i915_private device:
@rule1@
identifier fn, T;
@@
fn(struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@rule2@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200122125750.9737-2-wambui.karugax@gmail.com
2020-01-22 12:57:49 +00:00
|
|
|
struct drm_i915_private *i915 = eb->i915;
|
2020-08-19 14:08:48 +00:00
|
|
|
struct intel_gt_buffer_pool_node *pool = eb->batch_pool;
|
2020-08-19 14:08:54 +00:00
|
|
|
struct i915_vma *shadow, *trampoline, *batch;
|
2020-10-15 11:59:54 +00:00
|
|
|
unsigned long len;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
int err;
|
2014-12-11 20:13:12 +00:00
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
if (!eb_use_cmdparser(eb)) {
|
|
|
|
batch = eb_dispatch_secure(eb, eb->batch->vma);
|
|
|
|
if (IS_ERR(batch))
|
|
|
|
return PTR_ERR(batch);
|
|
|
|
|
|
|
|
goto secure_batch;
|
|
|
|
}
|
2019-12-11 11:04:36 +00:00
|
|
|
|
2019-12-11 23:08:56 +00:00
|
|
|
len = eb->batch_len;
|
|
|
|
if (!CMDPARSER_USES_GGTT(eb->i915)) {
|
|
|
|
/*
|
|
|
|
* ppGTT backed shadow buffers must be mapped RO, to prevent
|
|
|
|
* post-scan tampering
|
|
|
|
*/
|
|
|
|
if (!eb->context->vm->has_read_only) {
|
drm/i915/gem: initial conversion to new logging macros using coccinelle
First pass of conversion to the new struct drm_based device logging
macros in the drm/i915/gem directory. This conversion was achieved using
the following coccinelle script that transforms based on the existence
of a straightforward struct drm_i915_private device:
@rule1@
identifier fn, T;
@@
fn(struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@rule2@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200122125750.9737-2-wambui.karugax@gmail.com
2020-01-22 12:57:49 +00:00
|
|
|
drm_dbg(&i915->drm,
|
|
|
|
"Cannot prevent post-scan tampering without RO capable vm\n");
|
2019-12-11 23:08:56 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
|
|
|
|
}
|
2020-10-15 11:59:54 +00:00
|
|
|
if (unlikely(len < eb->batch_len)) /* last paranoid check of overflow */
|
|
|
|
return -EINVAL;
|
2019-12-11 23:08:56 +00:00
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
if (!pool) {
|
2021-01-19 13:31:06 +00:00
|
|
|
pool = intel_gt_get_buffer_pool(eb->engine->gt, len,
|
|
|
|
I915_MAP_WB);
|
2020-08-19 14:08:48 +00:00
|
|
|
if (IS_ERR(pool))
|
|
|
|
return PTR_ERR(pool);
|
|
|
|
eb->batch_pool = pool;
|
|
|
|
}
|
2014-12-11 20:13:12 +00:00
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
err = i915_gem_object_lock(pool->obj, &eb->ww);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
2014-12-11 20:13:12 +00:00
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
shadow = shadow_batch_pin(eb, pool->obj, eb->context->vm, PIN_USER);
|
2019-12-11 23:08:56 +00:00
|
|
|
if (IS_ERR(shadow)) {
|
|
|
|
err = PTR_ERR(shadow);
|
drm/i915/cmdparser: Add support for backward jumps
To keep things manageable, the pre-gen9 cmdparser does not
attempt to track any form of nested BB_START's. This did not
prevent usermode from using nested starts, or even chained
batches because the cmdparser is not strictly enforced pre gen9.
Instead, the existence of a nested BB_START would cause the batch
to be emitted in insecure mode, and any privileged capabilities
would not be available.
For Gen9, the cmdparser becomes mandatory (for BCS at least), and
so not providing any form of nested BB_START support becomes
overly restrictive. Any such batch will simply not run.
We make heavy use of backward jumps in igt, and it is much easier
to add support for this restricted subset of nested jumps, than to
rewrite the whole of our test suite to avoid them.
Add the required logic to support limited backward jumps, to
instructions that have already been validated by the parser.
Note that it's not sufficient to simply approve any BB_START
that jumps backwards in the buffer because this would allow an
attacker to embed a rogue instruction sequence within the
operand words of a harmless instruction (say LRI) and jump to
that.
We introduce a bit array to track every instr offset successfully
validated, and test the target of BB_START against this. If the
target offset hits, it is re-written to the same offset in the
shadow buffer and the BB_START cmd is allowed.
Note: This patch deliberately ignores checkpatch issues in the
cmdtables, in order to match the style of the surrounding code.
We'll correct the entire file in one go in a later patch.
v2: set dispatch secure late (Mika)
v3: rebase (Mika)
v4: Clear whitelist on each parse
Minor review updates (Chris)
v5: Correct backward jump batching
v6: fix compilation error due to struct eb shuffle (Mika)
Cc: Tony Luck <tony.luck@intel.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Takashi Iwai <tiwai@suse.de>
Cc: Tyler Hicks <tyhicks@canonical.com>
Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
2018-09-20 16:58:36 +00:00
|
|
|
goto err;
|
2019-12-11 11:04:36 +00:00
|
|
|
}
|
2021-03-23 15:50:18 +00:00
|
|
|
intel_gt_buffer_pool_mark_used(pool);
|
2019-12-11 23:08:56 +00:00
|
|
|
i915_gem_object_set_readonly(shadow->obj);
|
2020-06-04 10:37:30 +00:00
|
|
|
shadow->private = pool;
|
2019-12-11 23:08:56 +00:00
|
|
|
|
|
|
|
trampoline = NULL;
|
|
|
|
if (CMDPARSER_USES_GGTT(eb->i915)) {
|
|
|
|
trampoline = shadow;
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
shadow = shadow_batch_pin(eb, pool->obj,
|
2019-12-11 23:08:56 +00:00
|
|
|
&eb->engine->gt->ggtt->vm,
|
|
|
|
PIN_GLOBAL);
|
|
|
|
if (IS_ERR(shadow)) {
|
|
|
|
err = PTR_ERR(shadow);
|
|
|
|
shadow = trampoline;
|
|
|
|
goto err_shadow;
|
|
|
|
}
|
2020-06-04 10:37:30 +00:00
|
|
|
shadow->private = pool;
|
2019-12-11 23:08:56 +00:00
|
|
|
|
|
|
|
eb->batch_flags |= I915_DISPATCH_SECURE;
|
|
|
|
}
|
drm/i915/cmdparser: Add support for backward jumps
To keep things manageable, the pre-gen9 cmdparser does not
attempt to track any form of nested BB_START's. This did not
prevent usermode from using nested starts, or even chained
batches because the cmdparser is not strictly enforced pre gen9.
Instead, the existence of a nested BB_START would cause the batch
to be emitted in insecure mode, and any privileged capabilities
would not be available.
For Gen9, the cmdparser becomes mandatory (for BCS at least), and
so not providing any form of nested BB_START support becomes
overly restrictive. Any such batch will simply not run.
We make heavy use of backward jumps in igt, and it is much easier
to add support for this restricted subset of nested jumps, than to
rewrite the whole of our test suite to avoid them.
Add the required logic to support limited backward jumps, to
instructions that have already been validated by the parser.
Note that it's not sufficient to simply approve any BB_START
that jumps backwards in the buffer because this would allow an
attacker to embed a rogue instruction sequence within the
operand words of a harmless instruction (say LRI) and jump to
that.
We introduce a bit array to track every instr offset successfully
validated, and test the target of BB_START against this. If the
target offset hits, it is re-written to the same offset in the
shadow buffer and the BB_START cmd is allowed.
Note: This patch deliberately ignores checkpatch issues in the
cmdtables, in order to match the style of the surrounding code.
We'll correct the entire file in one go in a later patch.
v2: set dispatch secure late (Mika)
v3: rebase (Mika)
v4: Clear whitelist on each parse
Minor review updates (Chris)
v5: Correct backward jump batching
v6: fix compilation error due to struct eb shuffle (Mika)
Cc: Tony Luck <tony.luck@intel.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Takashi Iwai <tiwai@suse.de>
Cc: Tyler Hicks <tyhicks@canonical.com>
Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
2018-09-20 16:58:36 +00:00
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
batch = eb_dispatch_secure(eb, shadow);
|
|
|
|
if (IS_ERR(batch)) {
|
|
|
|
err = PTR_ERR(batch);
|
|
|
|
goto err_trampoline;
|
|
|
|
}
|
|
|
|
|
2019-12-11 23:08:57 +00:00
|
|
|
err = eb_parse_pipeline(eb, shadow, trampoline);
|
2019-12-11 23:08:56 +00:00
|
|
|
if (err)
|
2020-08-19 14:08:54 +00:00
|
|
|
goto err_unpin_batch;
|
2014-12-11 20:13:12 +00:00
|
|
|
|
2020-03-03 20:43:44 +00:00
|
|
|
eb->batch = &eb->vma[eb->buffer_count++];
|
2020-08-19 14:08:54 +00:00
|
|
|
eb->batch->vma = i915_vma_get(shadow);
|
|
|
|
eb->batch->flags = __EXEC_OBJECT_HAS_PIN;
|
2014-12-11 20:13:12 +00:00
|
|
|
|
2019-12-11 23:08:56 +00:00
|
|
|
eb->trampoline = trampoline;
|
2018-05-22 20:59:06 +00:00
|
|
|
eb->batch_start_offset = 0;
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
secure_batch:
|
|
|
|
if (batch) {
|
|
|
|
eb->batch = &eb->vma[eb->buffer_count++];
|
|
|
|
eb->batch->flags = __EXEC_OBJECT_HAS_PIN;
|
|
|
|
eb->batch->vma = i915_vma_get(batch);
|
|
|
|
}
|
2019-12-11 11:04:36 +00:00
|
|
|
return 0;
|
2019-08-04 12:48:26 +00:00
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
err_unpin_batch:
|
|
|
|
if (batch)
|
|
|
|
i915_vma_unpin(batch);
|
2019-12-11 23:08:56 +00:00
|
|
|
err_trampoline:
|
|
|
|
if (trampoline)
|
|
|
|
i915_vma_unpin(trampoline);
|
|
|
|
err_shadow:
|
|
|
|
i915_vma_unpin(shadow);
|
2019-08-04 12:48:26 +00:00
|
|
|
err:
|
2019-12-11 11:04:36 +00:00
|
|
|
return err;
|
2014-12-11 20:13:12 +00:00
|
|
|
}
|
2014-09-06 09:28:27 +00:00
|
|
|
|
2020-03-03 20:43:44 +00:00
|
|
|
static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
|
2014-07-03 15:28:05 +00:00
|
|
|
{
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
int err;
|
2014-07-03 15:28:05 +00:00
|
|
|
|
2021-01-08 20:40:26 +00:00
|
|
|
if (intel_context_nopreempt(eb->context))
|
|
|
|
__set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
err = eb_move_to_gpu(eb);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2014-07-03 15:28:05 +00:00
|
|
|
|
2017-06-15 08:14:33 +00:00
|
|
|
if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
err = i915_reset_gen7_sol_offsets(eb->request);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2014-07-03 15:28:05 +00:00
|
|
|
}
|
|
|
|
|
2019-01-29 18:54:50 +00:00
|
|
|
/*
|
|
|
|
* After we completed waiting for other engines (using HW semaphores)
|
|
|
|
* then we can signal that this request/batch is ready to run. This
|
|
|
|
* allows us to determine if the batch is still waiting on the GPU
|
|
|
|
* or actually running by checking the breadcrumb.
|
|
|
|
*/
|
|
|
|
if (eb->engine->emit_init_breadcrumb) {
|
|
|
|
err = eb->engine->emit_init_breadcrumb(eb->request);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
err = eb->engine->emit_bb_start(eb->request,
|
2020-03-03 20:43:44 +00:00
|
|
|
batch->node.start +
|
2017-06-15 08:14:33 +00:00
|
|
|
eb->batch_start_offset,
|
|
|
|
eb->batch_len,
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
eb->batch_flags);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2014-07-03 15:28:05 +00:00
|
|
|
|
2019-12-11 23:08:56 +00:00
|
|
|
if (eb->trampoline) {
|
|
|
|
GEM_BUG_ON(eb->batch_start_offset);
|
|
|
|
err = eb->engine->emit_bb_start(eb->request,
|
|
|
|
eb->trampoline->node.start +
|
|
|
|
eb->batch_len,
|
|
|
|
0, 0);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-10-06 10:39:55 +00:00
|
|
|
return 0;
|
2014-07-03 15:28:05 +00:00
|
|
|
}
|
|
|
|
|
2019-08-09 12:31:53 +00:00
|
|
|
static int num_vcs_engines(const struct drm_i915_private *i915)
|
|
|
|
{
|
2021-01-03 13:51:44 +00:00
|
|
|
return hweight_long(VDBOX_MASK(&i915->gt));
|
2019-08-09 12:31:53 +00:00
|
|
|
}
|
|
|
|
|
2018-02-08 11:39:17 +00:00
|
|
|
/*
|
2014-04-17 02:37:40 +00:00
|
|
|
* Find one BSD ring to dispatch the corresponding BSD command.
|
2016-07-27 08:07:27 +00:00
|
|
|
* The engine index is returned.
|
2014-04-17 02:37:40 +00:00
|
|
|
*/
|
2016-01-15 15:12:50 +00:00
|
|
|
static unsigned int
|
2016-07-27 08:07:27 +00:00
|
|
|
gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
|
|
|
|
struct drm_file *file)
|
2014-04-17 02:37:40 +00:00
|
|
|
{
|
|
|
|
struct drm_i915_file_private *file_priv = file->driver_priv;
|
|
|
|
|
2016-01-15 15:12:50 +00:00
|
|
|
/* Check whether the file_priv has already selected one ring. */
|
2016-09-01 11:58:21 +00:00
|
|
|
if ((int)file_priv->bsd_engine < 0)
|
2019-08-09 09:10:10 +00:00
|
|
|
file_priv->bsd_engine =
|
|
|
|
get_random_int() % num_vcs_engines(dev_priv);
|
drm/i915: Prevent negative relocation deltas from wrapping
This is pure evil. Userspace, I'm looking at you SNA, repacks batch
buffers on the fly after generation as they are being passed to the
kernel for execution. These batches also contain self-referenced
relocations as a single buffer encompasses the state commands, kernels,
vertices and sampler. During generation the buffers are placed at known
offsets within the full batch, and then the relocation deltas (as passed
to the kernel) are tweaked as the batch is repacked into a smaller buffer.
This means that userspace is passing negative relocations deltas, which
subsequently wrap to large values if the batch is at a low address. The
GPU hangs when it then tries to use the large value as a base for its
address offsets, rather than wrapping back to the real value (as one
would hope). As the GPU uses positive offsets from the base, we can
treat the relocation address as the minimum address read by the GPU.
For the upper bound, we trust that userspace will not read beyond the
end of the buffer.
So, how do we fix negative relocations from wrapping? We can either
check that every relocation looks valid when we write it, and then
position each object such that we prevent the offset wraparound, or we
just special-case the self-referential behaviour of SNA and force all
batches to be above 256k. Daniel prefers the latter approach.
This fixes a GPU hang when it tries to use an address (relocation +
offset) greater than the GTT size. The issue would occur quite easily
with full-ppgtt as each fd gets its own VM space, so low offsets would
often be handed out. However, with the rearrangement of the low GTT due
to capturing the BIOS framebuffer, it is already affecting kernels 3.15
onwards. I think only IVB+ is susceptible to this bug, but the workaround
should only kick in rarely, so it seems sensible to always apply it.
v3: Use a bias for batch buffers to prevent small negative delta relocations
from wrapping.
v4 from Daniel:
- s/BIAS/BATCH_OFFSET_BIAS/
- Extract eb_vma_misplaced/i915_vma_misplaced since the conditions
were growing rather cumbersome.
- Add a comment to eb_get_batch explaining why we do this.
- Apply the batch offset bias everywhere but mention that we've only
observed it on gen7 gpus.
- Drop PIN_OFFSET_FIX for now, that slipped in from a feature patch.
v5: Add static to eb_get_batch, spotted by 0-day tester.
Testcase: igt/gem_bad_reloc
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=78533
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> (v3)
Cc: stable@vger.kernel.org
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-05-23 06:48:08 +00:00
|
|
|
|
2016-07-27 08:07:27 +00:00
|
|
|
return file_priv->bsd_engine;
|
drm/i915: Prevent negative relocation deltas from wrapping
This is pure evil. Userspace, I'm looking at you SNA, repacks batch
buffers on the fly after generation as they are being passed to the
kernel for execution. These batches also contain self-referenced
relocations as a single buffer encompasses the state commands, kernels,
vertices and sampler. During generation the buffers are placed at known
offsets within the full batch, and then the relocation deltas (as passed
to the kernel) are tweaked as the batch is repacked into a smaller buffer.
This means that userspace is passing negative relocations deltas, which
subsequently wrap to large values if the batch is at a low address. The
GPU hangs when it then tries to use the large value as a base for its
address offsets, rather than wrapping back to the real value (as one
would hope). As the GPU uses positive offsets from the base, we can
treat the relocation address as the minimum address read by the GPU.
For the upper bound, we trust that userspace will not read beyond the
end of the buffer.
So, how do we fix negative relocations from wrapping? We can either
check that every relocation looks valid when we write it, and then
position each object such that we prevent the offset wraparound, or we
just special-case the self-referential behaviour of SNA and force all
batches to be above 256k. Daniel prefers the latter approach.
This fixes a GPU hang when it tries to use an address (relocation +
offset) greater than the GTT size. The issue would occur quite easily
with full-ppgtt as each fd gets its own VM space, so low offsets would
often be handed out. However, with the rearrangement of the low GTT due
to capturing the BIOS framebuffer, it is already affecting kernels 3.15
onwards. I think only IVB+ is susceptible to this bug, but the workaround
should only kick in rarely, so it seems sensible to always apply it.
v3: Use a bias for batch buffers to prevent small negative delta relocations
from wrapping.
v4 from Daniel:
- s/BIAS/BATCH_OFFSET_BIAS/
- Extract eb_vma_misplaced/i915_vma_misplaced since the conditions
were growing rather cumbersome.
- Add a comment to eb_get_batch explaining why we do this.
- Apply the batch offset bias everywhere but mention that we've only
observed it on gen7 gpus.
- Drop PIN_OFFSET_FIX for now, that slipped in from a feature patch.
v5: Add static to eb_get_batch, spotted by 0-day tester.
Testcase: igt/gem_bad_reloc
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=78533
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> (v3)
Cc: stable@vger.kernel.org
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-05-23 06:48:08 +00:00
|
|
|
}
|
|
|
|
|
2019-04-26 16:33:34 +00:00
|
|
|
static const enum intel_engine_id user_ring_map[] = {
|
2019-03-05 18:03:30 +00:00
|
|
|
[I915_EXEC_DEFAULT] = RCS0,
|
|
|
|
[I915_EXEC_RENDER] = RCS0,
|
|
|
|
[I915_EXEC_BLT] = BCS0,
|
|
|
|
[I915_EXEC_BSD] = VCS0,
|
|
|
|
[I915_EXEC_VEBOX] = VECS0
|
2016-01-15 15:12:50 +00:00
|
|
|
};
|
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
static struct i915_request *eb_throttle(struct i915_execbuffer *eb, struct intel_context *ce)
|
2019-08-15 20:57:09 +00:00
|
|
|
{
|
|
|
|
struct intel_ring *ring = ce->ring;
|
|
|
|
struct intel_timeline *tl = ce->timeline;
|
|
|
|
struct i915_request *rq;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Completely unscientific finger-in-the-air estimates for suitable
|
|
|
|
* maximum user request size (to avoid blocking) and then backoff.
|
|
|
|
*/
|
|
|
|
if (intel_ring_update_space(ring) >= PAGE_SIZE)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find a request that after waiting upon, there will be at least half
|
|
|
|
* the ring available. The hysteresis allows us to compete for the
|
|
|
|
* shared ring and should mean that we sleep less often prior to
|
|
|
|
* claiming our resources, but not so long that the ring completely
|
|
|
|
* drains before we can submit our next request.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(rq, &tl->requests, link) {
|
|
|
|
if (rq->ring != ring)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (__intel_ring_space(rq->postfix,
|
|
|
|
ring->emit, ring->size) > ring->size / 2)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (&rq->link == &tl->requests)
|
|
|
|
return NULL; /* weird, we will check again later for real */
|
|
|
|
|
|
|
|
return i915_request_get(rq);
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb, bool throttle)
|
2019-08-15 20:57:09 +00:00
|
|
|
{
|
2020-08-19 14:08:52 +00:00
|
|
|
struct intel_context *ce = eb->context;
|
2019-08-15 20:57:09 +00:00
|
|
|
struct intel_timeline *tl;
|
2020-08-19 14:08:52 +00:00
|
|
|
struct i915_request *rq = NULL;
|
2019-08-15 20:57:09 +00:00
|
|
|
int err;
|
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
GEM_BUG_ON(eb->args->flags & __EXEC_ENGINE_PINNED);
|
2019-04-25 05:01:43 +00:00
|
|
|
|
2019-12-20 10:12:29 +00:00
|
|
|
if (unlikely(intel_context_is_banned(ce)))
|
2020-08-19 14:08:52 +00:00
|
|
|
return ERR_PTR(-EIO);
|
2019-12-20 10:12:29 +00:00
|
|
|
|
2019-04-25 05:01:43 +00:00
|
|
|
/*
|
|
|
|
* Pinning the contexts may generate requests in order to acquire
|
|
|
|
* GGTT space, so do this first before we reserve a seqno for
|
|
|
|
* ourselves.
|
|
|
|
*/
|
2020-08-19 14:08:54 +00:00
|
|
|
err = intel_context_pin_ww(ce, &eb->ww);
|
2019-04-26 16:33:29 +00:00
|
|
|
if (err)
|
2020-08-19 14:08:52 +00:00
|
|
|
return ERR_PTR(err);
|
2019-04-25 05:01:43 +00:00
|
|
|
|
2019-08-04 12:48:25 +00:00
|
|
|
/*
|
|
|
|
* Take a local wakeref for preparing to dispatch the execbuf as
|
|
|
|
* we expect to access the hardware fairly frequently in the
|
|
|
|
* process, and require the engine to be kept awake between accesses.
|
|
|
|
* Upon dispatch, we acquire another prolonged wakeref that we hold
|
|
|
|
* until the timeline is idle, which in turn releases the wakeref
|
|
|
|
* taken on the engine, and the parent device.
|
|
|
|
*/
|
2019-08-15 20:57:09 +00:00
|
|
|
tl = intel_context_timeline_lock(ce);
|
|
|
|
if (IS_ERR(tl)) {
|
2020-08-19 14:08:52 +00:00
|
|
|
intel_context_unpin(ce);
|
|
|
|
return ERR_CAST(tl);
|
2019-08-15 20:57:09 +00:00
|
|
|
}
|
2019-08-04 12:48:25 +00:00
|
|
|
|
|
|
|
intel_context_enter(ce);
|
2020-08-19 14:08:52 +00:00
|
|
|
if (throttle)
|
|
|
|
rq = eb_throttle(eb, ce);
|
2019-08-15 20:57:09 +00:00
|
|
|
intel_context_timeline_unlock(tl);
|
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
eb->args->flags |= __EXEC_ENGINE_PINNED;
|
|
|
|
return rq;
|
2019-04-25 05:01:43 +00:00
|
|
|
}
|
|
|
|
|
2019-08-15 20:57:09 +00:00
|
|
|
static void eb_unpin_engine(struct i915_execbuffer *eb)
|
2019-04-25 05:01:43 +00:00
|
|
|
{
|
2019-08-04 12:48:25 +00:00
|
|
|
struct intel_context *ce = eb->context;
|
2019-08-09 18:25:18 +00:00
|
|
|
struct intel_timeline *tl = ce->timeline;
|
2019-08-04 12:48:25 +00:00
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
if (!(eb->args->flags & __EXEC_ENGINE_PINNED))
|
|
|
|
return;
|
|
|
|
|
|
|
|
eb->args->flags &= ~__EXEC_ENGINE_PINNED;
|
|
|
|
|
2019-08-04 12:48:25 +00:00
|
|
|
mutex_lock(&tl->mutex);
|
|
|
|
intel_context_exit(ce);
|
|
|
|
mutex_unlock(&tl->mutex);
|
|
|
|
|
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).
In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.
Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!
v2: Add some commentary, and some helpers to reduce patch churn.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 13:39:58 +00:00
|
|
|
intel_context_unpin(ce);
|
2019-04-25 05:01:43 +00:00
|
|
|
}
|
2016-01-15 15:12:50 +00:00
|
|
|
|
2019-04-26 16:33:34 +00:00
|
|
|
static unsigned int
|
2020-08-19 14:08:51 +00:00
|
|
|
eb_select_legacy_ring(struct i915_execbuffer *eb)
|
2016-01-15 15:12:50 +00:00
|
|
|
{
|
2019-04-25 05:01:43 +00:00
|
|
|
struct drm_i915_private *i915 = eb->i915;
|
2020-08-19 14:08:51 +00:00
|
|
|
struct drm_i915_gem_execbuffer2 *args = eb->args;
|
2016-01-15 15:12:50 +00:00
|
|
|
unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
|
|
|
|
|
2019-04-26 16:33:34 +00:00
|
|
|
if (user_ring_id != I915_EXEC_BSD &&
|
|
|
|
(args->flags & I915_EXEC_BSD_MASK)) {
|
drm/i915/gem: initial conversion to new logging macros using coccinelle
First pass of conversion to the new struct drm_based device logging
macros in the drm/i915/gem directory. This conversion was achieved using
the following coccinelle script that transforms based on the existence
of a straightforward struct drm_i915_private device:
@rule1@
identifier fn, T;
@@
fn(struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@rule2@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200122125750.9737-2-wambui.karugax@gmail.com
2020-01-22 12:57:49 +00:00
|
|
|
drm_dbg(&i915->drm,
|
|
|
|
"execbuf with non bsd ring but with invalid "
|
|
|
|
"bsd dispatch flags: %d\n", (int)(args->flags));
|
2019-04-26 16:33:34 +00:00
|
|
|
return -1;
|
2016-01-15 15:12:50 +00:00
|
|
|
}
|
|
|
|
|
2019-08-09 12:31:53 +00:00
|
|
|
if (user_ring_id == I915_EXEC_BSD && num_vcs_engines(i915) > 1) {
|
2016-01-15 15:12:50 +00:00
|
|
|
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
|
|
|
|
|
|
|
|
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
|
2020-08-19 14:08:51 +00:00
|
|
|
bsd_idx = gen8_dispatch_bsd_engine(i915, eb->file);
|
2016-01-15 15:12:50 +00:00
|
|
|
} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
|
|
|
|
bsd_idx <= I915_EXEC_BSD_RING2) {
|
2016-01-27 13:41:09 +00:00
|
|
|
bsd_idx >>= I915_EXEC_BSD_SHIFT;
|
2016-01-15 15:12:50 +00:00
|
|
|
bsd_idx--;
|
|
|
|
} else {
|
drm/i915/gem: initial conversion to new logging macros using coccinelle
First pass of conversion to the new struct drm_based device logging
macros in the drm/i915/gem directory. This conversion was achieved using
the following coccinelle script that transforms based on the existence
of a straightforward struct drm_i915_private device:
@rule1@
identifier fn, T;
@@
fn(struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@rule2@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200122125750.9737-2-wambui.karugax@gmail.com
2020-01-22 12:57:49 +00:00
|
|
|
drm_dbg(&i915->drm,
|
|
|
|
"execbuf with unknown bsd ring: %u\n",
|
|
|
|
bsd_idx);
|
2019-04-26 16:33:34 +00:00
|
|
|
return -1;
|
2016-01-15 15:12:50 +00:00
|
|
|
}
|
|
|
|
|
2019-04-26 16:33:34 +00:00
|
|
|
return _VCS(bsd_idx);
|
2016-01-15 15:12:50 +00:00
|
|
|
}
|
|
|
|
|
2019-04-26 16:33:34 +00:00
|
|
|
if (user_ring_id >= ARRAY_SIZE(user_ring_map)) {
|
drm/i915/gem: initial conversion to new logging macros using coccinelle
First pass of conversion to the new struct drm_based device logging
macros in the drm/i915/gem directory. This conversion was achieved using
the following coccinelle script that transforms based on the existence
of a straightforward struct drm_i915_private device:
@rule1@
identifier fn, T;
@@
fn(struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@rule2@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200122125750.9737-2-wambui.karugax@gmail.com
2020-01-22 12:57:49 +00:00
|
|
|
drm_dbg(&i915->drm, "execbuf with unknown ring: %u\n",
|
|
|
|
user_ring_id);
|
2019-04-26 16:33:34 +00:00
|
|
|
return -1;
|
2016-01-15 15:12:50 +00:00
|
|
|
}
|
|
|
|
|
2019-04-26 16:33:34 +00:00
|
|
|
return user_ring_map[user_ring_id];
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-08-19 14:08:52 +00:00
|
|
|
eb_select_engine(struct i915_execbuffer *eb)
|
2019-04-26 16:33:34 +00:00
|
|
|
{
|
|
|
|
struct intel_context *ce;
|
|
|
|
unsigned int idx;
|
|
|
|
int err;
|
|
|
|
|
drm/i915: Allow a context to define its set of engines
Over the last few years, we have debated how to extend the user API to
support an increase in the number of engines, that may be sparse and
even be heterogeneous within a class (not all video decoders created
equal). We settled on using (class, instance) tuples to identify a
specific engine, with an API for the user to construct a map of engines
to capabilities. Into this picture, we then add a challenge of virtual
engines; one user engine that maps behind the scenes to any number of
physical engines. To keep it general, we want the user to have full
control over that mapping. To that end, we allow the user to constrain a
context to define the set of engines that it can access, order fully
controlled by the user via (class, instance). With such precise control
in context setup, we can continue to use the existing execbuf uABI of
specifying a single index; only now it doesn't automagically map onto
the engines, it uses the user defined engine map from the context.
v2: Fixup freeing of local on success of get_engines()
v3: Allow empty engines[]
v4: s/nengine/num_engines/
v5: Replace 64 limit on num_engines with a note that execbuf is
currently limited to only using the first 64 engines.
v6: Actually use the engines_mutex to guard the ctx->engines.
Testcase: igt/gem_ctx_engines
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190521211134.16117-2-chris@chris-wilson.co.uk
2019-05-21 21:11:26 +00:00
|
|
|
if (i915_gem_context_user_engines(eb->gem_context))
|
2020-08-19 14:08:51 +00:00
|
|
|
idx = eb->args->flags & I915_EXEC_RING_MASK;
|
drm/i915: Allow a context to define its set of engines
Over the last few years, we have debated how to extend the user API to
support an increase in the number of engines, that may be sparse and
even be heterogeneous within a class (not all video decoders created
equal). We settled on using (class, instance) tuples to identify a
specific engine, with an API for the user to construct a map of engines
to capabilities. Into this picture, we then add a challenge of virtual
engines; one user engine that maps behind the scenes to any number of
physical engines. To keep it general, we want the user to have full
control over that mapping. To that end, we allow the user to constrain a
context to define the set of engines that it can access, order fully
controlled by the user via (class, instance). With such precise control
in context setup, we can continue to use the existing execbuf uABI of
specifying a single index; only now it doesn't automagically map onto
the engines, it uses the user defined engine map from the context.
v2: Fixup freeing of local on success of get_engines()
v3: Allow empty engines[]
v4: s/nengine/num_engines/
v5: Replace 64 limit on num_engines with a note that execbuf is
currently limited to only using the first 64 engines.
v6: Actually use the engines_mutex to guard the ctx->engines.
Testcase: igt/gem_ctx_engines
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190521211134.16117-2-chris@chris-wilson.co.uk
2019-05-21 21:11:26 +00:00
|
|
|
else
|
2020-08-19 14:08:51 +00:00
|
|
|
idx = eb_select_legacy_ring(eb);
|
2019-04-26 16:33:34 +00:00
|
|
|
|
|
|
|
ce = i915_gem_context_get_engine(eb->gem_context, idx);
|
|
|
|
if (IS_ERR(ce))
|
|
|
|
return PTR_ERR(ce);
|
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
intel_gt_pm_get(ce->engine->gt);
|
2019-04-26 16:33:34 +00:00
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
|
|
|
|
err = intel_context_alloc_state(ce);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
|
|
|
|
* EIO if the GPU is already wedged.
|
|
|
|
*/
|
|
|
|
err = intel_gt_terminally_wedged(ce->engine->gt);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
eb->context = ce;
|
|
|
|
eb->engine = ce->engine;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure engine pool stays alive even if we call intel_context_put
|
|
|
|
* during ww handling. The pool is destroyed when last pm reference
|
|
|
|
* is dropped, which breaks our -EDEADLK handling.
|
|
|
|
*/
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err:
|
|
|
|
intel_gt_pm_put(ce->engine->gt);
|
|
|
|
intel_context_put(ce);
|
2019-04-26 16:33:34 +00:00
|
|
|
return err;
|
2016-01-15 15:12:50 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
static void
|
|
|
|
eb_put_engine(struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
intel_gt_pm_put(eb->engine->gt);
|
|
|
|
intel_context_put(eb->context);
|
|
|
|
}
|
|
|
|
|
2017-08-15 14:57:33 +00:00
|
|
|
static void
|
2020-08-04 08:59:54 +00:00
|
|
|
__free_fence_array(struct eb_fence *fences, unsigned int n)
|
2017-08-15 14:57:33 +00:00
|
|
|
{
|
2020-08-04 08:59:54 +00:00
|
|
|
while (n--) {
|
2020-08-04 08:59:53 +00:00
|
|
|
drm_syncobj_put(ptr_mask_bits(fences[n].syncobj, 2));
|
2020-08-04 08:59:54 +00:00
|
|
|
dma_fence_put(fences[n].dma_fence);
|
|
|
|
kfree(fences[n].chain_fence);
|
|
|
|
}
|
2017-08-15 14:57:33 +00:00
|
|
|
kvfree(fences);
|
|
|
|
}
|
|
|
|
|
2020-08-04 08:59:53 +00:00
|
|
|
static int
|
2020-08-04 08:59:54 +00:00
|
|
|
add_timeline_fence_array(struct i915_execbuffer *eb,
|
|
|
|
const struct drm_i915_gem_execbuffer_ext_timeline_fences *timeline_fences)
|
2017-08-15 14:57:33 +00:00
|
|
|
{
|
2020-08-04 08:59:54 +00:00
|
|
|
struct drm_i915_gem_exec_fence __user *user_fences;
|
|
|
|
u64 __user *user_values;
|
|
|
|
struct eb_fence *f;
|
|
|
|
u64 nfences;
|
|
|
|
int err = 0;
|
2017-08-15 14:57:33 +00:00
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
nfences = timeline_fences->fence_count;
|
|
|
|
if (!nfences)
|
2020-08-04 08:59:53 +00:00
|
|
|
return 0;
|
2017-08-15 14:57:33 +00:00
|
|
|
|
2017-11-16 10:50:59 +00:00
|
|
|
/* Check multiplication overflow for access_ok() and kvmalloc_array() */
|
|
|
|
BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
|
|
|
|
if (nfences > min_t(unsigned long,
|
2020-08-04 08:59:54 +00:00
|
|
|
ULONG_MAX / sizeof(*user_fences),
|
|
|
|
SIZE_MAX / sizeof(*f)) - eb->num_fences)
|
2020-08-04 08:59:53 +00:00
|
|
|
return -EINVAL;
|
2017-08-15 14:57:33 +00:00
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
user_fences = u64_to_user_ptr(timeline_fences->handles_ptr);
|
|
|
|
if (!access_ok(user_fences, nfences * sizeof(*user_fences)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
user_values = u64_to_user_ptr(timeline_fences->values_ptr);
|
|
|
|
if (!access_ok(user_values, nfences * sizeof(*user_values)))
|
2020-08-04 08:59:53 +00:00
|
|
|
return -EFAULT;
|
2017-08-15 14:57:33 +00:00
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
f = krealloc(eb->fences,
|
|
|
|
(eb->num_fences + nfences) * sizeof(*f),
|
|
|
|
__GFP_NOWARN | GFP_KERNEL);
|
|
|
|
if (!f)
|
2020-08-04 08:59:53 +00:00
|
|
|
return -ENOMEM;
|
2017-08-15 14:57:33 +00:00
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
eb->fences = f;
|
|
|
|
f += eb->num_fences;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
|
|
|
|
~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
|
|
|
|
|
|
|
|
while (nfences--) {
|
|
|
|
struct drm_i915_gem_exec_fence user_fence;
|
2017-08-15 14:57:33 +00:00
|
|
|
struct drm_syncobj *syncobj;
|
2020-08-04 08:59:54 +00:00
|
|
|
struct dma_fence *fence = NULL;
|
|
|
|
u64 point;
|
|
|
|
|
|
|
|
if (__copy_from_user(&user_fence,
|
|
|
|
user_fences++,
|
|
|
|
sizeof(user_fence)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (__get_user(point, user_values++))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
syncobj = drm_syncobj_find(eb->file, user_fence.handle);
|
|
|
|
if (!syncobj) {
|
|
|
|
DRM_DEBUG("Invalid syncobj handle provided\n");
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
fence = drm_syncobj_fence_get(syncobj);
|
2017-08-15 14:57:33 +00:00
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
if (!fence && user_fence.flags &&
|
|
|
|
!(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) {
|
|
|
|
DRM_DEBUG("Syncobj handle has no fence\n");
|
|
|
|
drm_syncobj_put(syncobj);
|
|
|
|
return -EINVAL;
|
2017-08-15 14:57:33 +00:00
|
|
|
}
|
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
if (fence)
|
|
|
|
err = dma_fence_chain_find_seqno(&fence, point);
|
|
|
|
|
|
|
|
if (err && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) {
|
|
|
|
DRM_DEBUG("Syncobj handle missing requested point %llu\n", point);
|
2020-08-06 16:10:56 +00:00
|
|
|
dma_fence_put(fence);
|
2020-08-04 08:59:54 +00:00
|
|
|
drm_syncobj_put(syncobj);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A point might have been signaled already and
|
|
|
|
* garbage collected from the timeline. In this case
|
|
|
|
* just ignore the point and carry on.
|
|
|
|
*/
|
|
|
|
if (!fence && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) {
|
|
|
|
drm_syncobj_put(syncobj);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For timeline syncobjs we need to preallocate chains for
|
|
|
|
* later signaling.
|
|
|
|
*/
|
|
|
|
if (point != 0 && user_fence.flags & I915_EXEC_FENCE_SIGNAL) {
|
|
|
|
/*
|
|
|
|
* Waiting and signaling the same point (when point !=
|
|
|
|
* 0) would break the timeline.
|
|
|
|
*/
|
|
|
|
if (user_fence.flags & I915_EXEC_FENCE_WAIT) {
|
|
|
|
DRM_DEBUG("Trying to wait & signal the same timeline point.\n");
|
|
|
|
dma_fence_put(fence);
|
|
|
|
drm_syncobj_put(syncobj);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
f->chain_fence =
|
|
|
|
kmalloc(sizeof(*f->chain_fence),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!f->chain_fence) {
|
|
|
|
drm_syncobj_put(syncobj);
|
|
|
|
dma_fence_put(fence);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
f->chain_fence = NULL;
|
2017-10-31 10:23:25 +00:00
|
|
|
}
|
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
f->syncobj = ptr_pack_bits(syncobj, user_fence.flags, 2);
|
|
|
|
f->dma_fence = fence;
|
|
|
|
f->value = point;
|
|
|
|
f++;
|
|
|
|
eb->num_fences++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int add_fence_array(struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_execbuffer2 *args = eb->args;
|
|
|
|
struct drm_i915_gem_exec_fence __user *user;
|
|
|
|
unsigned long num_fences = args->num_cliprects;
|
|
|
|
struct eb_fence *f;
|
|
|
|
|
|
|
|
if (!(args->flags & I915_EXEC_FENCE_ARRAY))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!num_fences)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Check multiplication overflow for access_ok() and kvmalloc_array() */
|
|
|
|
BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
|
|
|
|
if (num_fences > min_t(unsigned long,
|
|
|
|
ULONG_MAX / sizeof(*user),
|
|
|
|
SIZE_MAX / sizeof(*f) - eb->num_fences))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
user = u64_to_user_ptr(args->cliprects_ptr);
|
|
|
|
if (!access_ok(user, num_fences * sizeof(*user)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
f = krealloc(eb->fences,
|
|
|
|
(eb->num_fences + num_fences) * sizeof(*f),
|
|
|
|
__GFP_NOWARN | GFP_KERNEL);
|
|
|
|
if (!f)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
eb->fences = f;
|
|
|
|
f += eb->num_fences;
|
|
|
|
while (num_fences--) {
|
|
|
|
struct drm_i915_gem_exec_fence user_fence;
|
|
|
|
struct drm_syncobj *syncobj;
|
|
|
|
struct dma_fence *fence = NULL;
|
|
|
|
|
|
|
|
if (__copy_from_user(&user_fence, user++, sizeof(user_fence)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
syncobj = drm_syncobj_find(eb->file, user_fence.handle);
|
2017-08-15 14:57:33 +00:00
|
|
|
if (!syncobj) {
|
|
|
|
DRM_DEBUG("Invalid syncobj handle provided\n");
|
2020-08-04 08:59:54 +00:00
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (user_fence.flags & I915_EXEC_FENCE_WAIT) {
|
|
|
|
fence = drm_syncobj_fence_get(syncobj);
|
|
|
|
if (!fence) {
|
|
|
|
DRM_DEBUG("Syncobj handle has no fence\n");
|
|
|
|
drm_syncobj_put(syncobj);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2017-08-15 14:57:33 +00:00
|
|
|
}
|
|
|
|
|
2017-10-31 10:23:25 +00:00
|
|
|
BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
|
|
|
|
~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
|
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
f->syncobj = ptr_pack_bits(syncobj, user_fence.flags, 2);
|
|
|
|
f->dma_fence = fence;
|
|
|
|
f->value = 0;
|
|
|
|
f->chain_fence = NULL;
|
|
|
|
f++;
|
|
|
|
eb->num_fences++;
|
2017-08-15 14:57:33 +00:00
|
|
|
}
|
|
|
|
|
2020-08-04 08:59:53 +00:00
|
|
|
return 0;
|
2020-08-04 08:59:54 +00:00
|
|
|
}
|
2017-08-15 14:57:33 +00:00
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
static void put_fence_array(struct eb_fence *fences, int num_fences)
|
|
|
|
{
|
|
|
|
if (fences)
|
|
|
|
__free_fence_array(fences, num_fences);
|
2017-08-15 14:57:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-08-04 08:59:53 +00:00
|
|
|
await_fence_array(struct i915_execbuffer *eb)
|
2017-08-15 14:57:33 +00:00
|
|
|
{
|
|
|
|
unsigned int n;
|
|
|
|
int err;
|
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
for (n = 0; n < eb->num_fences; n++) {
|
2017-08-15 14:57:33 +00:00
|
|
|
struct drm_syncobj *syncobj;
|
|
|
|
unsigned int flags;
|
|
|
|
|
2020-08-04 08:59:53 +00:00
|
|
|
syncobj = ptr_unpack_bits(eb->fences[n].syncobj, &flags, 2);
|
2017-08-15 14:57:33 +00:00
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
if (!eb->fences[n].dma_fence)
|
|
|
|
continue;
|
2017-08-15 14:57:33 +00:00
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
err = i915_request_await_dma_fence(eb->request,
|
|
|
|
eb->fences[n].dma_fence);
|
2017-08-15 14:57:33 +00:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
static void signal_fence_array(const struct i915_execbuffer *eb)
|
2017-08-15 14:57:33 +00:00
|
|
|
{
|
|
|
|
struct dma_fence * const fence = &eb->request->fence;
|
|
|
|
unsigned int n;
|
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
for (n = 0; n < eb->num_fences; n++) {
|
2017-08-15 14:57:33 +00:00
|
|
|
struct drm_syncobj *syncobj;
|
|
|
|
unsigned int flags;
|
|
|
|
|
2020-08-04 08:59:53 +00:00
|
|
|
syncobj = ptr_unpack_bits(eb->fences[n].syncobj, &flags, 2);
|
2017-08-15 14:57:33 +00:00
|
|
|
if (!(flags & I915_EXEC_FENCE_SIGNAL))
|
|
|
|
continue;
|
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
if (eb->fences[n].chain_fence) {
|
|
|
|
drm_syncobj_add_point(syncobj,
|
|
|
|
eb->fences[n].chain_fence,
|
|
|
|
fence,
|
|
|
|
eb->fences[n].value);
|
|
|
|
/*
|
|
|
|
* The chain's ownership is transferred to the
|
|
|
|
* timeline.
|
|
|
|
*/
|
|
|
|
eb->fences[n].chain_fence = NULL;
|
|
|
|
} else {
|
|
|
|
drm_syncobj_replace_fence(syncobj, fence);
|
|
|
|
}
|
2017-08-15 14:57:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
static int
|
|
|
|
parse_timeline_fences(struct i915_user_extension __user *ext, void *data)
|
|
|
|
{
|
|
|
|
struct i915_execbuffer *eb = data;
|
|
|
|
struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
|
|
|
|
|
|
|
|
if (copy_from_user(&timeline_fences, ext, sizeof(timeline_fences)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return add_timeline_fence_array(eb, &timeline_fences);
|
|
|
|
}
|
|
|
|
|
2020-03-03 08:05:46 +00:00
|
|
|
static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
|
|
|
|
{
|
|
|
|
struct i915_request *rq, *rn;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(rq, rn, &tl->requests, link)
|
|
|
|
if (rq == end || !i915_request_retire(rq))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-12-03 10:34:32 +00:00
|
|
|
static int eb_request_add(struct i915_execbuffer *eb, int err)
|
2020-03-03 08:05:46 +00:00
|
|
|
{
|
|
|
|
struct i915_request *rq = eb->request;
|
|
|
|
struct intel_timeline * const tl = i915_request_timeline(rq);
|
|
|
|
struct i915_sched_attr attr = {};
|
|
|
|
struct i915_request *prev;
|
|
|
|
|
|
|
|
lockdep_assert_held(&tl->mutex);
|
|
|
|
lockdep_unpin_lock(&tl->mutex, rq->cookie);
|
|
|
|
|
|
|
|
trace_i915_request_add(rq);
|
|
|
|
|
|
|
|
prev = __i915_request_commit(rq);
|
|
|
|
|
|
|
|
/* Check that the context wasn't destroyed before submission */
|
2020-03-19 17:07:06 +00:00
|
|
|
if (likely(!intel_context_is_closed(eb->context))) {
|
2020-03-03 08:05:46 +00:00
|
|
|
attr = eb->gem_context->sched;
|
|
|
|
} else {
|
|
|
|
/* Serialise with context_close via the add_to_timeline */
|
2020-03-04 12:18:48 +00:00
|
|
|
i915_request_set_error_once(rq, -ENOENT);
|
|
|
|
__i915_request_skip(rq);
|
2020-12-03 10:34:32 +00:00
|
|
|
err = -ENOENT; /* override any transient errors */
|
2020-03-03 08:05:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
__i915_request_queue(rq, &attr);
|
|
|
|
|
|
|
|
/* Try to clean up the client's timeline after submitting the request */
|
|
|
|
if (prev)
|
|
|
|
retire_requests(tl, prev);
|
|
|
|
|
|
|
|
mutex_unlock(&tl->mutex);
|
2020-12-03 10:34:32 +00:00
|
|
|
|
|
|
|
return err;
|
2020-03-03 08:05:46 +00:00
|
|
|
}
|
|
|
|
|
2020-08-04 08:59:53 +00:00
|
|
|
static const i915_user_extension_fn execbuf_extensions[] = {
|
2020-08-04 08:59:54 +00:00
|
|
|
[DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES] = parse_timeline_fences,
|
2020-08-04 08:59:53 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
parse_execbuf2_extensions(struct drm_i915_gem_execbuffer2 *args,
|
|
|
|
struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
if (!(args->flags & I915_EXEC_USE_EXTENSIONS))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* The execbuf2 extension mechanism reuses cliprects_ptr. So we cannot
|
|
|
|
* have another flag also using it at the same time.
|
|
|
|
*/
|
|
|
|
if (eb->args->flags & I915_EXEC_FENCE_ARRAY)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (args->num_cliprects != 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return i915_user_extensions(u64_to_user_ptr(args->cliprects_ptr),
|
|
|
|
execbuf_extensions,
|
|
|
|
ARRAY_SIZE(execbuf_extensions),
|
|
|
|
eb);
|
|
|
|
}
|
|
|
|
|
2010-11-25 18:00:26 +00:00
|
|
|
static int
|
2017-06-15 08:14:33 +00:00
|
|
|
i915_gem_do_execbuffer(struct drm_device *dev,
|
2010-11-25 18:00:26 +00:00
|
|
|
struct drm_file *file,
|
|
|
|
struct drm_i915_gem_execbuffer2 *args,
|
2020-08-04 08:59:53 +00:00
|
|
|
struct drm_i915_gem_exec_object2 *exec)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
2018-06-08 15:53:46 +00:00
|
|
|
struct drm_i915_private *i915 = to_i915(dev);
|
2017-06-15 08:14:33 +00:00
|
|
|
struct i915_execbuffer eb;
|
2017-01-27 09:40:08 +00:00
|
|
|
struct dma_fence *in_fence = NULL;
|
|
|
|
struct sync_file *out_fence = NULL;
|
2020-03-03 20:43:44 +00:00
|
|
|
struct i915_vma *batch;
|
2017-01-27 09:40:08 +00:00
|
|
|
int out_fence_fd = -1;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
int err;
|
2010-11-25 19:32:06 +00:00
|
|
|
|
2017-09-21 11:01:35 +00:00
|
|
|
BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
|
|
|
|
~__EXEC_OBJECT_UNKNOWN_FLAGS);
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2018-06-08 15:53:46 +00:00
|
|
|
eb.i915 = i915;
|
2017-06-15 08:14:33 +00:00
|
|
|
eb.file = file;
|
|
|
|
eb.args = args;
|
2020-09-08 05:41:17 +00:00
|
|
|
if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
args->flags |= __EXEC_HAS_RELOC;
|
2017-08-16 08:52:06 +00:00
|
|
|
|
2017-06-15 08:14:33 +00:00
|
|
|
eb.exec = exec;
|
2020-08-19 14:08:44 +00:00
|
|
|
eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
|
|
|
|
eb.vma[0].vma = NULL;
|
2020-08-19 14:08:48 +00:00
|
|
|
eb.reloc_pool = eb.batch_pool = NULL;
|
2020-08-19 14:08:52 +00:00
|
|
|
eb.reloc_context = NULL;
|
2017-08-16 08:52:06 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
|
2017-06-15 08:14:33 +00:00
|
|
|
reloc_cache_init(&eb.reloc_cache, eb.i915);
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
eb.buffer_count = args->buffer_count;
|
2017-06-15 08:14:33 +00:00
|
|
|
eb.batch_start_offset = args->batch_start_offset;
|
|
|
|
eb.batch_len = args->batch_len;
|
2019-12-11 23:08:56 +00:00
|
|
|
eb.trampoline = NULL;
|
2017-06-15 08:14:33 +00:00
|
|
|
|
2020-08-04 08:59:53 +00:00
|
|
|
eb.fences = NULL;
|
2020-08-04 08:59:54 +00:00
|
|
|
eb.num_fences = 0;
|
2020-08-04 08:59:53 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
eb.batch_flags = 0;
|
2012-10-17 11:09:54 +00:00
|
|
|
if (args->flags & I915_EXEC_SECURE) {
|
2021-06-05 15:53:54 +00:00
|
|
|
if (GRAPHICS_VER(i915) >= 11)
|
2018-06-08 15:53:46 +00:00
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
/* Return -EPERM to trigger fallback code on old binaries. */
|
|
|
|
if (!HAS_SECURE_BATCHES(i915))
|
|
|
|
return -EPERM;
|
|
|
|
|
2016-06-21 08:54:20 +00:00
|
|
|
if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
|
2018-06-08 15:53:46 +00:00
|
|
|
return -EPERM;
|
2012-10-17 11:09:54 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
eb.batch_flags |= I915_DISPATCH_SECURE;
|
2012-10-17 11:09:54 +00:00
|
|
|
}
|
2012-12-17 15:21:27 +00:00
|
|
|
if (args->flags & I915_EXEC_IS_PINNED)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
eb.batch_flags |= I915_DISPATCH_PINNED;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
err = parse_execbuf2_extensions(args, &eb);
|
|
|
|
if (err)
|
|
|
|
goto err_ext;
|
|
|
|
|
|
|
|
err = add_fence_array(&eb);
|
|
|
|
if (err)
|
|
|
|
goto err_ext;
|
|
|
|
|
2020-05-13 18:09:37 +00:00
|
|
|
#define IN_FENCES (I915_EXEC_FENCE_IN | I915_EXEC_FENCE_SUBMIT)
|
|
|
|
if (args->flags & IN_FENCES) {
|
|
|
|
if ((args->flags & IN_FENCES) == IN_FENCES)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-01-27 09:40:08 +00:00
|
|
|
in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
|
2020-08-04 08:59:54 +00:00
|
|
|
if (!in_fence) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto err_ext;
|
|
|
|
}
|
2017-01-27 09:40:08 +00:00
|
|
|
}
|
2020-05-13 18:09:37 +00:00
|
|
|
#undef IN_FENCES
|
2019-05-21 21:11:34 +00:00
|
|
|
|
2017-01-27 09:40:08 +00:00
|
|
|
if (args->flags & I915_EXEC_FENCE_OUT) {
|
|
|
|
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
|
|
|
|
if (out_fence_fd < 0) {
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
err = out_fence_fd;
|
2020-05-13 18:09:37 +00:00
|
|
|
goto err_in_fence;
|
2017-01-27 09:40:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-04 08:59:53 +00:00
|
|
|
err = eb_create(&eb);
|
|
|
|
if (err)
|
2020-08-04 08:59:54 +00:00
|
|
|
goto err_out_fence;
|
2020-08-04 08:59:53 +00:00
|
|
|
|
2017-06-29 15:04:25 +00:00
|
|
|
GEM_BUG_ON(!eb.lut_size);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2017-06-20 11:05:47 +00:00
|
|
|
err = eb_select_context(&eb);
|
|
|
|
if (unlikely(err))
|
|
|
|
goto err_destroy;
|
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
err = eb_select_engine(&eb);
|
2019-02-07 07:18:22 +00:00
|
|
|
if (unlikely(err))
|
2019-08-15 20:57:09 +00:00
|
|
|
goto err_context;
|
2019-02-07 07:18:22 +00:00
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
err = eb_lookup_vmas(&eb);
|
|
|
|
if (err) {
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
eb_release_vmas(&eb, true, true);
|
2020-08-19 14:08:48 +00:00
|
|
|
goto err_engine;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_gem_ww_ctx_init(&eb.ww, true);
|
|
|
|
|
2020-08-19 14:08:47 +00:00
|
|
|
err = eb_relocate_parse(&eb);
|
2017-07-21 14:50:36 +00:00
|
|
|
if (err) {
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/*
|
|
|
|
* If the user expects the execobject.offset and
|
|
|
|
* reloc.presumed_offset to be an exact match,
|
|
|
|
* as for using NO_RELOC, then we cannot update
|
|
|
|
* the execobject.offset until we have completed
|
|
|
|
* relocation.
|
|
|
|
*/
|
|
|
|
args->flags &= ~__EXEC_HAS_RELOC;
|
|
|
|
goto err_vma;
|
2017-07-21 14:50:36 +00:00
|
|
|
}
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
ww_acquire_done(&eb.ww.ctx);
|
2020-03-03 20:43:44 +00:00
|
|
|
|
|
|
|
batch = eb.batch->vma;
|
2012-10-17 11:09:54 +00:00
|
|
|
|
2017-06-16 14:05:24 +00:00
|
|
|
/* All GPU relocation batches must be submitted prior to the user rq */
|
|
|
|
GEM_BUG_ON(eb.reloc_cache.rq);
|
|
|
|
|
2015-05-29 16:43:25 +00:00
|
|
|
/* Allocate a request for this batch buffer nice and early. */
|
2019-04-25 05:01:43 +00:00
|
|
|
eb.request = i915_request_create(eb.context);
|
2017-06-15 08:14:33 +00:00
|
|
|
if (IS_ERR(eb.request)) {
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
err = PTR_ERR(eb.request);
|
2020-08-19 14:08:54 +00:00
|
|
|
goto err_vma;
|
drm/i915: simplify allocation of driver-internal requests
There are a number of places where the driver needs a request, but isn't
working on behalf of any specific user or in a specific context. At
present, we associate them with the per-engine default context. A future
patch will abolish those per-engine context pointers; but we can already
eliminate a lot of the references to them, just by making the allocator
allow NULL as a shorthand for "an appropriate context for this ring",
which will mean that the callers don't need to know anything about how
the "appropriate context" is found (e.g. per-ring vs per-device, etc).
So this patch renames the existing i915_gem_request_alloc(), and makes
it local (static inline), and replaces it with a wrapper that provides
a default if the context is NULL, and also has a nicer calling
convention (doesn't require a pointer to an output parameter). Then we
change all callers to use the new convention:
OLD:
err = i915_gem_request_alloc(ring, user_ctx, &req);
if (err) ...
NEW:
req = i915_gem_request_alloc(ring, user_ctx);
if (IS_ERR(req)) ...
OLD:
err = i915_gem_request_alloc(ring, ring->default_context, &req);
if (err) ...
NEW:
req = i915_gem_request_alloc(ring, NULL);
if (IS_ERR(req)) ...
v4: Rebased
Signed-off-by: Dave Gordon <david.s.gordon@intel.com>
Reviewed-by: Nick Hoath <nicholas.hoath@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1453230175-19330-2-git-send-email-david.s.gordon@intel.com
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2016-01-19 19:02:53 +00:00
|
|
|
}
|
2015-05-29 16:43:25 +00:00
|
|
|
|
2017-01-27 09:40:08 +00:00
|
|
|
if (in_fence) {
|
2020-05-13 18:09:37 +00:00
|
|
|
if (args->flags & I915_EXEC_FENCE_SUBMIT)
|
|
|
|
err = i915_request_await_execution(eb.request,
|
|
|
|
in_fence,
|
|
|
|
eb.engine->bond_execute);
|
|
|
|
else
|
|
|
|
err = i915_request_await_dma_fence(eb.request,
|
|
|
|
in_fence);
|
2019-05-21 21:11:34 +00:00
|
|
|
if (err < 0)
|
|
|
|
goto err_request;
|
|
|
|
}
|
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
if (eb.fences) {
|
2020-08-04 08:59:53 +00:00
|
|
|
err = await_fence_array(&eb);
|
2017-08-15 14:57:33 +00:00
|
|
|
if (err)
|
|
|
|
goto err_request;
|
|
|
|
}
|
|
|
|
|
2017-01-27 09:40:08 +00:00
|
|
|
if (out_fence_fd != -1) {
|
2017-06-15 08:14:33 +00:00
|
|
|
out_fence = sync_file_create(&eb.request->fence);
|
2017-01-27 09:40:08 +00:00
|
|
|
if (!out_fence) {
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
err = -ENOMEM;
|
2017-01-27 09:40:08 +00:00
|
|
|
goto err_request;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/*
|
|
|
|
* Whilst this request exists, batch_obj will be on the
|
2016-08-10 12:41:46 +00:00
|
|
|
* active_list, and so will hold the active reference. Only when this
|
|
|
|
* request is retired will the the batch_obj be moved onto the
|
|
|
|
* inactive_list and lose its active reference. Hence we do not need
|
|
|
|
* to explicitly hold another reference here.
|
|
|
|
*/
|
2020-03-03 20:43:44 +00:00
|
|
|
eb.request->batch = batch;
|
2020-08-19 14:08:48 +00:00
|
|
|
if (eb.batch_pool)
|
|
|
|
intel_gt_buffer_pool_mark_active(eb.batch_pool, eb.request);
|
2015-05-29 16:43:27 +00:00
|
|
|
|
2018-02-21 09:56:36 +00:00
|
|
|
trace_i915_request_queue(eb.request, eb.batch_flags);
|
2020-03-03 20:43:44 +00:00
|
|
|
err = eb_submit(&eb, batch);
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
|
drm/i915: Late request cancellations are harmful
Conceptually, each request is a record of a hardware transaction - we
build up a list of pending commands and then either commit them to
hardware, or cancel them. However, whilst building up the list of
pending commands, we may modify state outside of the request and make
references to the pending request. If we do so and then cancel that
request, external objects then point to the deleted request leading to
both graphical and memory corruption.
The easiest example is to consider object/VMA tracking. When we mark an
object as active in a request, we store a pointer to this, the most
recent request, in the object. Then we want to free that object, we wait
for the most recent request to be idle before proceeding (otherwise the
hardware will write to pages now owned by the system, or we will attempt
to read from those pages before the hardware is finished writing). If
the request was cancelled instead, that wait completes immediately. As a
result, all requests must be committed and not cancelled if the external
state is unknown.
All that remains of i915_gem_request_cancel() users are just a couple of
extremely unlikely allocation failures, so remove the API entirely.
A consequence of committing all incomplete requests is that we generate
excess breadcrumbs and fill the ring much more often with dummy work. We
have completely undone the outstanding_last_seqno optimisation.
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=93907
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: stable@vger.kernel.org
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1460565315-7748-16-git-send-email-chris@chris-wilson.co.uk
2016-04-13 16:35:15 +00:00
|
|
|
err_request:
|
2019-12-17 13:47:29 +00:00
|
|
|
i915_request_get(eb.request);
|
2020-12-03 10:34:32 +00:00
|
|
|
err = eb_request_add(&eb, err);
|
2017-03-02 12:25:25 +00:00
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
if (eb.fences)
|
2020-08-04 08:59:53 +00:00
|
|
|
signal_fence_array(&eb);
|
2017-08-15 14:57:33 +00:00
|
|
|
|
2017-01-27 09:40:08 +00:00
|
|
|
if (out_fence) {
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (err == 0) {
|
2017-01-27 09:40:08 +00:00
|
|
|
fd_install(out_fence_fd, out_fence->file);
|
2018-02-14 19:18:25 +00:00
|
|
|
args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
|
2017-01-27 09:40:08 +00:00
|
|
|
args->rsvd2 |= (u64)out_fence_fd << 32;
|
|
|
|
out_fence_fd = -1;
|
|
|
|
} else {
|
|
|
|
fput(out_fence->file);
|
|
|
|
}
|
|
|
|
}
|
2019-12-17 13:47:29 +00:00
|
|
|
i915_request_put(eb.request);
|
2010-11-25 18:00:26 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
err_vma:
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
eb_release_vmas(&eb, true, true);
|
2019-12-11 23:08:56 +00:00
|
|
|
if (eb.trampoline)
|
|
|
|
i915_vma_unpin(eb.trampoline);
|
2020-08-19 14:08:48 +00:00
|
|
|
WARN_ON(err == -EDEADLK);
|
|
|
|
i915_gem_ww_ctx_fini(&eb.ww);
|
|
|
|
|
|
|
|
if (eb.batch_pool)
|
|
|
|
intel_gt_buffer_pool_put(eb.batch_pool);
|
|
|
|
if (eb.reloc_pool)
|
|
|
|
intel_gt_buffer_pool_put(eb.reloc_pool);
|
2020-08-19 14:08:52 +00:00
|
|
|
if (eb.reloc_context)
|
|
|
|
intel_context_put(eb.reloc_context);
|
2020-08-19 14:08:48 +00:00
|
|
|
err_engine:
|
2020-08-19 14:08:52 +00:00
|
|
|
eb_put_engine(&eb);
|
2019-08-04 12:48:25 +00:00
|
|
|
err_context:
|
2019-04-25 05:01:43 +00:00
|
|
|
i915_gem_context_put(eb.gem_context);
|
2017-06-20 11:05:47 +00:00
|
|
|
err_destroy:
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
eb_destroy(&eb);
|
2017-06-29 15:04:25 +00:00
|
|
|
err_out_fence:
|
2017-01-27 09:40:08 +00:00
|
|
|
if (out_fence_fd != -1)
|
|
|
|
put_unused_fd(out_fence_fd);
|
2017-02-03 22:45:29 +00:00
|
|
|
err_in_fence:
|
2017-01-27 09:40:08 +00:00
|
|
|
dma_fence_put(in_fence);
|
2020-08-04 08:59:54 +00:00
|
|
|
err_ext:
|
|
|
|
put_fence_array(eb.fences, eb.num_fences);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return err;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
2017-11-16 10:50:59 +00:00
|
|
|
static size_t eb_element_size(void)
|
|
|
|
{
|
2020-08-19 14:08:44 +00:00
|
|
|
return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma);
|
2017-11-16 10:50:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool check_buffer_count(size_t count)
|
|
|
|
{
|
|
|
|
const size_t sz = eb_element_size();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup
|
|
|
|
* array size (see eb_create()). Otherwise, we can accept an array as
|
|
|
|
* large as can be addressed (though use large arrays at your peril)!
|
|
|
|
*/
|
|
|
|
|
|
|
|
return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1);
|
|
|
|
}
|
|
|
|
|
2010-11-25 18:00:26 +00:00
|
|
|
int
|
2018-02-07 16:48:41 +00:00
|
|
|
i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
2020-01-22 12:57:50 +00:00
|
|
|
struct drm_i915_private *i915 = to_i915(dev);
|
2010-11-25 18:00:26 +00:00
|
|
|
struct drm_i915_gem_execbuffer2 *args = data;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
struct drm_i915_gem_exec_object2 *exec2_list;
|
2017-11-16 10:50:59 +00:00
|
|
|
const size_t count = args->buffer_count;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
int err;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2017-11-16 10:50:59 +00:00
|
|
|
if (!check_buffer_count(count)) {
|
2020-01-22 12:57:50 +00:00
|
|
|
drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
|
2010-11-25 18:00:26 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-12-09 12:23:14 +00:00
|
|
|
err = i915_gem_check_execbuffer(args);
|
|
|
|
if (err)
|
|
|
|
return err;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
/* Allocate extra slots for use by the command parser */
|
|
|
|
exec2_list = kvmalloc_array(count + 2, eb_element_size(),
|
2017-09-13 23:28:29 +00:00
|
|
|
__GFP_NOWARN | GFP_KERNEL);
|
2010-11-25 18:00:26 +00:00
|
|
|
if (exec2_list == NULL) {
|
2020-01-22 12:57:50 +00:00
|
|
|
drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n",
|
|
|
|
count);
|
2010-11-25 18:00:26 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (copy_from_user(exec2_list,
|
|
|
|
u64_to_user_ptr(args->buffers_ptr),
|
2017-11-16 10:50:59 +00:00
|
|
|
sizeof(*exec2_list) * count)) {
|
2020-01-22 12:57:50 +00:00
|
|
|
drm_dbg(&i915->drm, "copy %zd exec entries failed\n", count);
|
2017-05-17 12:23:12 +00:00
|
|
|
kvfree(exec2_list);
|
2010-11-25 18:00:26 +00:00
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
2020-08-04 08:59:53 +00:00
|
|
|
err = i915_gem_do_execbuffer(dev, file, args, exec2_list);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now that we have begun execution of the batchbuffer, we ignore
|
|
|
|
* any new error after this point. Also given that we have already
|
|
|
|
* updated the associated relocations, we try to write out the current
|
|
|
|
* object locations irrespective of any error.
|
|
|
|
*/
|
|
|
|
if (args->flags & __EXEC_HAS_RELOC) {
|
2014-06-13 13:42:51 +00:00
|
|
|
struct drm_i915_gem_exec_object2 __user *user_exec_list =
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
u64_to_user_ptr(args->buffers_ptr);
|
|
|
|
unsigned int i;
|
2014-05-23 09:45:52 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/* Copy the new buffer offsets back to the user's exec list. */
|
2019-01-04 20:56:09 +00:00
|
|
|
/*
|
|
|
|
* Note: count * sizeof(*user_exec_list) does not overflow,
|
|
|
|
* because we checked 'count' in check_buffer_count().
|
|
|
|
*
|
|
|
|
* And this range already got effectively checked earlier
|
|
|
|
* when we did the "copy_from_user()" above.
|
|
|
|
*/
|
2020-04-03 07:20:52 +00:00
|
|
|
if (!user_write_access_begin(user_exec_list,
|
|
|
|
count * sizeof(*user_exec_list)))
|
2019-02-28 12:52:31 +00:00
|
|
|
goto end;
|
2019-01-04 20:56:09 +00:00
|
|
|
|
2014-05-23 09:45:52 +00:00
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (!(exec2_list[i].offset & UPDATE))
|
|
|
|
continue;
|
|
|
|
|
2015-12-29 17:24:52 +00:00
|
|
|
exec2_list[i].offset =
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
|
|
|
|
unsafe_put_user(exec2_list[i].offset,
|
|
|
|
&user_exec_list[i].offset,
|
|
|
|
end_user);
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
end_user:
|
2020-04-03 07:20:52 +00:00
|
|
|
user_write_access_end();
|
2019-02-28 12:52:31 +00:00
|
|
|
end:;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
|
2017-05-17 12:23:12 +00:00
|
|
|
kvfree(exec2_list);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return err;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
2020-05-04 14:06:29 +00:00
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
|
|
|
#include "selftests/i915_gem_execbuffer.c"
|
|
|
|
#endif
|