2018-04-14 12:27:54 +00:00
|
|
|
/*
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
*
|
|
|
|
* Copyright © 2018 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
2019-05-28 09:29:49 +00:00
|
|
|
#include "gem/i915_gem_pm.h"
|
2019-08-06 12:43:00 +00:00
|
|
|
#include "gt/intel_engine_user.h"
|
2019-06-21 07:08:02 +00:00
|
|
|
#include "gt/intel_gt.h"
|
2019-04-24 17:48:39 +00:00
|
|
|
#include "i915_selftest.h"
|
|
|
|
#include "intel_reset.h"
|
2018-04-14 12:27:54 +00:00
|
|
|
|
2019-04-24 17:48:39 +00:00
|
|
|
#include "selftests/igt_flush_test.h"
|
|
|
|
#include "selftests/igt_reset.h"
|
|
|
|
#include "selftests/igt_spinner.h"
|
|
|
|
#include "selftests/mock_drm.h"
|
2019-03-01 16:01:08 +00:00
|
|
|
|
2019-05-28 09:29:49 +00:00
|
|
|
#include "gem/selftests/igt_gem_utils.h"
|
|
|
|
#include "gem/selftests/mock_context.h"
|
|
|
|
|
2019-03-01 16:01:08 +00:00
|
|
|
static const struct wo_register {
|
|
|
|
enum intel_platform platform;
|
|
|
|
u32 reg;
|
|
|
|
} wo_registers[] = {
|
|
|
|
{ INTEL_GEMINILAKE, 0x731c }
|
|
|
|
};
|
2018-04-14 12:27:54 +00:00
|
|
|
|
2019-01-10 01:32:31 +00:00
|
|
|
struct wa_lists {
|
|
|
|
struct i915_wa_list gt_wa_list;
|
|
|
|
struct {
|
|
|
|
struct i915_wa_list wa_list;
|
2019-05-20 14:25:46 +00:00
|
|
|
struct i915_wa_list ctx_wa_list;
|
2019-01-10 01:32:31 +00:00
|
|
|
} engine[I915_NUM_ENGINES];
|
|
|
|
};
|
|
|
|
|
2019-10-09 06:17:59 +00:00
|
|
|
static int request_add_sync(struct i915_request *rq, int err)
|
|
|
|
{
|
|
|
|
i915_request_get(rq);
|
|
|
|
i915_request_add(rq);
|
|
|
|
if (i915_request_wait(rq, 0, HZ / 5) < 0)
|
|
|
|
err = -EIO;
|
|
|
|
i915_request_put(rq);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
i915_request_get(rq);
|
|
|
|
i915_request_add(rq);
|
|
|
|
if (spin && !igt_wait_for_spinner(spin, rq))
|
|
|
|
err = -ETIMEDOUT;
|
|
|
|
i915_request_put(rq);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-01-10 01:32:31 +00:00
|
|
|
static void
|
|
|
|
reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
|
|
|
|
{
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
enum intel_engine_id id;
|
|
|
|
|
|
|
|
memset(lists, 0, sizeof(*lists));
|
|
|
|
|
2019-07-12 07:07:45 +00:00
|
|
|
wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
|
2019-01-10 01:32:31 +00:00
|
|
|
gt_init_workarounds(i915, &lists->gt_wa_list);
|
|
|
|
wa_init_finish(&lists->gt_wa_list);
|
|
|
|
|
|
|
|
for_each_engine(engine, i915, id) {
|
|
|
|
struct i915_wa_list *wal = &lists->engine[id].wa_list;
|
|
|
|
|
2019-07-12 07:07:45 +00:00
|
|
|
wa_init_start(wal, "REF", engine->name);
|
2019-01-10 01:32:31 +00:00
|
|
|
engine_init_workarounds(engine, wal);
|
|
|
|
wa_init_finish(wal);
|
2019-05-20 14:25:46 +00:00
|
|
|
|
|
|
|
__intel_engine_init_ctx_wa(engine,
|
|
|
|
&lists->engine[id].ctx_wa_list,
|
2019-07-12 07:07:45 +00:00
|
|
|
"CTX_REF");
|
2019-01-10 01:32:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
|
|
|
|
{
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
enum intel_engine_id id;
|
|
|
|
|
|
|
|
for_each_engine(engine, i915, id)
|
|
|
|
intel_wa_list_free(&lists->engine[id].wa_list);
|
|
|
|
|
|
|
|
intel_wa_list_free(&lists->gt_wa_list);
|
|
|
|
}
|
|
|
|
|
2018-04-14 12:27:54 +00:00
|
|
|
static struct drm_i915_gem_object *
|
|
|
|
read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
|
|
|
|
{
|
2019-01-14 14:21:22 +00:00
|
|
|
const u32 base = engine->mmio_base;
|
2018-04-14 12:27:54 +00:00
|
|
|
struct drm_i915_gem_object *result;
|
|
|
|
struct i915_request *rq;
|
|
|
|
struct i915_vma *vma;
|
|
|
|
u32 srm, *cs;
|
|
|
|
int err;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
|
|
|
|
if (IS_ERR(result))
|
|
|
|
return result;
|
|
|
|
|
2019-03-01 16:01:08 +00:00
|
|
|
i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
|
2018-04-14 12:27:54 +00:00
|
|
|
|
|
|
|
cs = i915_gem_object_pin_map(result, I915_MAP_WB);
|
|
|
|
if (IS_ERR(cs)) {
|
|
|
|
err = PTR_ERR(cs);
|
|
|
|
goto err_obj;
|
|
|
|
}
|
|
|
|
memset(cs, 0xc5, PAGE_SIZE);
|
drm/i915: Flush pages on acquisition
When we return pages to the system, we ensure that they are marked as
being in the CPU domain since any external access is uncontrolled and we
must assume the worst. This means that we need to always flush the pages
on acquisition if we need to use them on the GPU, and from the beginning
have used set-domain. Set-domain is overkill for the purpose as it is a
general synchronisation barrier, but our intent is to only flush the
pages being swapped in. If we move that flush into the pages acquisition
phase, we know then that when we have obj->mm.pages, they are coherent
with the GPU and need only maintain that status without resorting to
heavy handed use of set-domain.
The principle knock-on effect for userspace is through mmap-gtt
pagefaulting. Our uAPI has always implied that the GTT mmap was async
(especially as when any pagefault occurs is unpredicatable to userspace)
and so userspace had to apply explicit domain control itself
(set-domain). However, swapping is transparent to the kernel, and so on
first fault we need to acquire the pages and make them coherent for
access through the GTT. Our use of set-domain here leaks into the uABI
that the first pagefault was synchronous. This is unintentional and
baring a few igt should be unoticed, nevertheless we bump the uABI
version for mmap-gtt to reflect the change in behaviour.
Another implication of the change is that gem_create() is presumed to
create an object that is coherent with the CPU and is in the CPU write
domain, so a set-domain(CPU) following a gem_create() would be a minor
operation that merely checked whether we could allocate all pages for
the object. On applying this change, a set-domain(CPU) causes a clflush
as we acquire the pages. This will have a small impact on mesa as we move
the clflush here on !llc from execbuf time to create, but that should
have minimal performance impact as the same clflush exists but is now
done early and because of the clflush issue, userspace recycles bo and
so should resist allocating fresh objects.
Internally, the presumption that objects are created in the CPU
write-domain and remain so through writes to obj->mm.mapping is more
prevalent than I expected; but easy enough to catch and apply a manual
flush.
For the future, we should push the page flush from the central
set_pages() into the callers so that we can more finely control when it
is applied, but for now doing it one location is easier to validate, at
the cost of sometimes flushing when there is no need.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Antonio Argenziano <antonio.argenziano@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321161908.8007-1-chris@chris-wilson.co.uk
2019-03-21 16:19:07 +00:00
|
|
|
i915_gem_object_flush_map(result);
|
2018-04-14 12:27:54 +00:00
|
|
|
i915_gem_object_unpin_map(result);
|
|
|
|
|
2019-06-21 07:08:08 +00:00
|
|
|
vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
|
2018-04-14 12:27:54 +00:00
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
err = PTR_ERR(vma);
|
|
|
|
goto err_obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
|
|
|
|
if (err)
|
|
|
|
goto err_obj;
|
|
|
|
|
2019-04-26 16:33:36 +00:00
|
|
|
rq = igt_request_alloc(ctx, engine);
|
2018-04-14 12:27:54 +00:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto err_pin;
|
|
|
|
}
|
|
|
|
|
2019-05-28 09:29:51 +00:00
|
|
|
i915_vma_lock(vma);
|
2019-08-19 11:20:33 +00:00
|
|
|
err = i915_request_await_object(rq, vma->obj, true);
|
|
|
|
if (err == 0)
|
|
|
|
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
2019-05-28 09:29:51 +00:00
|
|
|
i915_vma_unlock(vma);
|
2018-07-06 10:39:44 +00:00
|
|
|
if (err)
|
|
|
|
goto err_req;
|
|
|
|
|
2018-04-14 12:27:54 +00:00
|
|
|
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
|
|
|
|
if (INTEL_GEN(ctx->i915) >= 8)
|
|
|
|
srm++;
|
|
|
|
|
|
|
|
cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
|
2018-04-16 21:57:01 +00:00
|
|
|
if (IS_ERR(cs)) {
|
|
|
|
err = PTR_ERR(cs);
|
|
|
|
goto err_req;
|
|
|
|
}
|
|
|
|
|
2018-04-14 12:27:54 +00:00
|
|
|
for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
|
|
|
|
*cs++ = srm;
|
|
|
|
*cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
|
|
|
|
*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
|
|
|
|
*cs++ = 0;
|
|
|
|
}
|
|
|
|
intel_ring_advance(rq, cs);
|
|
|
|
|
2018-06-12 10:51:35 +00:00
|
|
|
i915_request_add(rq);
|
2018-04-14 12:27:54 +00:00
|
|
|
i915_vma_unpin(vma);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
|
2018-04-16 21:57:01 +00:00
|
|
|
err_req:
|
|
|
|
i915_request_add(rq);
|
2018-04-14 12:27:54 +00:00
|
|
|
err_pin:
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
err_obj:
|
|
|
|
i915_gem_object_put(result);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
2018-12-03 12:50:12 +00:00
|
|
|
static u32
|
|
|
|
get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
|
2018-04-14 12:27:54 +00:00
|
|
|
{
|
2018-12-03 12:50:12 +00:00
|
|
|
i915_reg_t reg = i < engine->whitelist.count ?
|
|
|
|
engine->whitelist.list[i].reg :
|
|
|
|
RING_NOPID(engine->mmio_base);
|
|
|
|
|
|
|
|
return i915_mmio_reg_offset(reg);
|
2018-04-14 12:27:54 +00:00
|
|
|
}
|
|
|
|
|
2018-12-03 12:50:12 +00:00
|
|
|
static void
|
|
|
|
print_results(const struct intel_engine_cs *engine, const u32 *results)
|
2018-04-14 12:27:54 +00:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
|
2018-12-03 12:50:12 +00:00
|
|
|
u32 expected = get_whitelist_reg(engine, i);
|
2018-04-14 12:27:54 +00:00
|
|
|
u32 actual = results[i];
|
|
|
|
|
|
|
|
pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
|
|
|
|
i, expected, actual);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-03 12:50:12 +00:00
|
|
|
static int check_whitelist(struct i915_gem_context *ctx,
|
2018-04-14 12:27:54 +00:00
|
|
|
struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_object *results;
|
2019-07-12 19:29:53 +00:00
|
|
|
struct intel_wedge_me wedge;
|
2018-04-14 12:27:54 +00:00
|
|
|
u32 *vaddr;
|
|
|
|
int err;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
results = read_nonprivs(ctx, engine);
|
|
|
|
if (IS_ERR(results))
|
|
|
|
return PTR_ERR(results);
|
|
|
|
|
2018-07-11 12:29:52 +00:00
|
|
|
err = 0;
|
2019-05-28 09:29:51 +00:00
|
|
|
i915_gem_object_lock(results);
|
2019-07-12 19:29:53 +00:00
|
|
|
intel_wedge_on_timeout(&wedge, &ctx->i915->gt, HZ / 5) /* safety net! */
|
2018-07-11 12:29:52 +00:00
|
|
|
err = i915_gem_object_set_to_cpu_domain(results, false);
|
2019-05-28 09:29:51 +00:00
|
|
|
i915_gem_object_unlock(results);
|
2019-07-12 19:29:53 +00:00
|
|
|
if (intel_gt_is_wedged(&ctx->i915->gt))
|
2018-07-11 12:29:52 +00:00
|
|
|
err = -EIO;
|
2018-04-14 12:27:54 +00:00
|
|
|
if (err)
|
|
|
|
goto out_put;
|
|
|
|
|
|
|
|
vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
|
|
|
|
if (IS_ERR(vaddr)) {
|
|
|
|
err = PTR_ERR(vaddr);
|
|
|
|
goto out_put;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
|
2018-12-03 12:50:12 +00:00
|
|
|
u32 expected = get_whitelist_reg(engine, i);
|
2018-04-14 12:27:54 +00:00
|
|
|
u32 actual = vaddr[i];
|
|
|
|
|
|
|
|
if (expected != actual) {
|
2018-12-03 12:50:12 +00:00
|
|
|
print_results(engine, vaddr);
|
2018-04-14 12:27:54 +00:00
|
|
|
pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
|
|
|
|
i, expected, actual);
|
|
|
|
|
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_gem_object_unpin_map(results);
|
|
|
|
out_put:
|
|
|
|
i915_gem_object_put(results);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int do_device_reset(struct intel_engine_cs *engine)
|
|
|
|
{
|
2019-07-12 19:29:53 +00:00
|
|
|
intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
|
2018-04-14 12:27:54 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int do_engine_reset(struct intel_engine_cs *engine)
|
|
|
|
{
|
2019-07-12 19:29:53 +00:00
|
|
|
return intel_engine_reset(engine, "live_workarounds");
|
2018-04-14 12:27:54 +00:00
|
|
|
}
|
|
|
|
|
2018-11-30 09:52:11 +00:00
|
|
|
static int
|
|
|
|
switch_to_scratch_context(struct intel_engine_cs *engine,
|
|
|
|
struct igt_spinner *spin)
|
2018-04-14 12:27:54 +00:00
|
|
|
{
|
|
|
|
struct i915_gem_context *ctx;
|
2019-07-31 08:11:26 +00:00
|
|
|
struct intel_context *ce;
|
2018-04-14 12:27:54 +00:00
|
|
|
struct i915_request *rq;
|
2018-11-30 09:52:11 +00:00
|
|
|
int err = 0;
|
2018-04-14 12:27:54 +00:00
|
|
|
|
|
|
|
ctx = kernel_context(engine->i915);
|
|
|
|
if (IS_ERR(ctx))
|
|
|
|
return PTR_ERR(ctx);
|
|
|
|
|
2019-02-18 14:50:50 +00:00
|
|
|
GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
|
|
|
|
|
2019-08-08 11:06:12 +00:00
|
|
|
ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
|
2019-07-31 08:11:26 +00:00
|
|
|
GEM_BUG_ON(IS_ERR(ce));
|
|
|
|
|
2019-10-09 06:17:59 +00:00
|
|
|
rq = igt_spinner_create_request(spin, ce, MI_NOOP);
|
2018-09-20 14:49:34 +00:00
|
|
|
|
2019-07-31 08:11:26 +00:00
|
|
|
intel_context_put(ce);
|
2018-11-30 09:52:11 +00:00
|
|
|
|
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
spin = NULL;
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto err;
|
|
|
|
}
|
2018-04-14 12:27:54 +00:00
|
|
|
|
2019-10-09 06:17:59 +00:00
|
|
|
err = request_add_spin(rq, spin);
|
2018-11-30 09:52:11 +00:00
|
|
|
err:
|
|
|
|
if (err && spin)
|
|
|
|
igt_spinner_end(spin);
|
|
|
|
|
2019-10-04 13:40:09 +00:00
|
|
|
kernel_context_close(ctx);
|
2018-11-30 09:52:11 +00:00
|
|
|
return err;
|
2018-04-14 12:27:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int check_whitelist_across_reset(struct intel_engine_cs *engine,
|
|
|
|
int (*reset)(struct intel_engine_cs *),
|
|
|
|
const char *name)
|
|
|
|
{
|
2018-11-30 09:52:11 +00:00
|
|
|
struct drm_i915_private *i915 = engine->i915;
|
2019-07-08 15:23:21 +00:00
|
|
|
struct i915_gem_context *ctx, *tmp;
|
2018-11-30 09:52:11 +00:00
|
|
|
struct igt_spinner spin;
|
2019-01-14 14:21:22 +00:00
|
|
|
intel_wakeref_t wakeref;
|
2018-04-14 12:27:54 +00:00
|
|
|
int err;
|
|
|
|
|
2019-07-12 07:07:45 +00:00
|
|
|
pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
|
|
|
|
engine->whitelist.count, engine->name, name);
|
2018-11-30 09:52:11 +00:00
|
|
|
|
|
|
|
ctx = kernel_context(i915);
|
2018-04-14 12:27:54 +00:00
|
|
|
if (IS_ERR(ctx))
|
|
|
|
return PTR_ERR(ctx);
|
|
|
|
|
2019-07-31 08:11:26 +00:00
|
|
|
err = igt_spinner_init(&spin, engine->gt);
|
2019-07-08 15:23:21 +00:00
|
|
|
if (err)
|
|
|
|
goto out_ctx;
|
|
|
|
|
2018-12-03 12:50:12 +00:00
|
|
|
err = check_whitelist(ctx, engine);
|
2018-04-14 12:27:54 +00:00
|
|
|
if (err) {
|
|
|
|
pr_err("Invalid whitelist *before* %s reset!\n", name);
|
2019-07-08 15:23:21 +00:00
|
|
|
goto out_spin;
|
2018-04-14 12:27:54 +00:00
|
|
|
}
|
|
|
|
|
2019-02-13 22:48:05 +00:00
|
|
|
err = switch_to_scratch_context(engine, &spin);
|
2018-04-14 12:27:54 +00:00
|
|
|
if (err)
|
2019-07-08 15:23:21 +00:00
|
|
|
goto out_spin;
|
2018-04-14 12:27:54 +00:00
|
|
|
|
2019-10-07 15:45:31 +00:00
|
|
|
with_intel_runtime_pm(engine->uncore->rpm, wakeref)
|
2019-01-14 14:21:23 +00:00
|
|
|
err = reset(engine);
|
2018-11-30 09:52:11 +00:00
|
|
|
|
2019-02-13 22:48:05 +00:00
|
|
|
igt_spinner_end(&spin);
|
2018-11-30 09:52:11 +00:00
|
|
|
|
2018-04-14 12:27:54 +00:00
|
|
|
if (err) {
|
|
|
|
pr_err("%s reset failed\n", name);
|
2019-07-08 15:23:21 +00:00
|
|
|
goto out_spin;
|
2018-04-14 12:27:54 +00:00
|
|
|
}
|
|
|
|
|
2018-12-03 12:50:12 +00:00
|
|
|
err = check_whitelist(ctx, engine);
|
2018-04-14 12:27:54 +00:00
|
|
|
if (err) {
|
|
|
|
pr_err("Whitelist not preserved in context across %s reset!\n",
|
|
|
|
name);
|
2019-07-08 15:23:21 +00:00
|
|
|
goto out_spin;
|
2018-04-14 12:27:54 +00:00
|
|
|
}
|
|
|
|
|
2019-07-08 15:23:21 +00:00
|
|
|
tmp = kernel_context(i915);
|
|
|
|
if (IS_ERR(tmp)) {
|
|
|
|
err = PTR_ERR(tmp);
|
|
|
|
goto out_spin;
|
|
|
|
}
|
2018-04-14 12:27:54 +00:00
|
|
|
kernel_context_close(ctx);
|
2019-07-08 15:23:21 +00:00
|
|
|
ctx = tmp;
|
2018-04-14 12:27:54 +00:00
|
|
|
|
2018-12-03 12:50:12 +00:00
|
|
|
err = check_whitelist(ctx, engine);
|
2018-04-14 12:27:54 +00:00
|
|
|
if (err) {
|
|
|
|
pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
|
|
|
|
name);
|
2019-07-08 15:23:21 +00:00
|
|
|
goto out_spin;
|
2018-04-14 12:27:54 +00:00
|
|
|
}
|
|
|
|
|
2019-07-08 15:23:21 +00:00
|
|
|
out_spin:
|
|
|
|
igt_spinner_fini(&spin);
|
|
|
|
out_ctx:
|
2018-04-14 12:27:54 +00:00
|
|
|
kernel_context_close(ctx);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-03-01 16:01:08 +00:00
|
|
|
static struct i915_vma *create_batch(struct i915_gem_context *ctx)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_object *obj;
|
2019-10-04 13:40:09 +00:00
|
|
|
struct i915_address_space *vm;
|
2019-03-01 16:01:08 +00:00
|
|
|
struct i915_vma *vma;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
|
|
|
|
if (IS_ERR(obj))
|
|
|
|
return ERR_CAST(obj);
|
|
|
|
|
2019-10-04 13:40:09 +00:00
|
|
|
vm = i915_gem_context_get_vm_rcu(ctx);
|
|
|
|
vma = i915_vma_instance(obj, vm, NULL);
|
|
|
|
i915_vm_put(vm);
|
2019-03-01 16:01:08 +00:00
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
err = PTR_ERR(vma);
|
|
|
|
goto err_obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
|
|
|
if (err)
|
|
|
|
goto err_obj;
|
|
|
|
|
|
|
|
return vma;
|
|
|
|
|
|
|
|
err_obj:
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 reg_write(u32 old, u32 new, u32 rsvd)
|
|
|
|
{
|
|
|
|
if (rsvd == 0x0000ffff) {
|
|
|
|
old &= ~(new >> 16);
|
|
|
|
old |= new & (new >> 16);
|
|
|
|
} else {
|
|
|
|
old &= ~rsvd;
|
|
|
|
old |= new & rsvd;
|
|
|
|
}
|
|
|
|
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool wo_register(struct intel_engine_cs *engine, u32 reg)
|
|
|
|
{
|
|
|
|
enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
|
|
|
|
int i;
|
|
|
|
|
2019-07-12 07:07:43 +00:00
|
|
|
if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
|
|
|
|
RING_FORCE_TO_NONPRIV_ACCESS_WR)
|
|
|
|
return true;
|
|
|
|
|
2019-03-01 16:01:08 +00:00
|
|
|
for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
|
|
|
|
if (wo_registers[i].platform == platform &&
|
|
|
|
wo_registers[i].reg == reg)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-18 01:01:08 +00:00
|
|
|
static bool ro_register(u32 reg)
|
|
|
|
{
|
2019-07-12 07:07:43 +00:00
|
|
|
if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
|
|
|
|
RING_FORCE_TO_NONPRIV_ACCESS_RD)
|
2019-06-18 01:01:08 +00:00
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int whitelist_writable_count(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
int count = engine->whitelist.count;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < engine->whitelist.count; i++) {
|
|
|
|
u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
|
|
|
|
|
|
|
|
if (ro_register(reg))
|
|
|
|
count--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2019-03-01 16:01:08 +00:00
|
|
|
static int check_dirty_whitelist(struct i915_gem_context *ctx,
|
|
|
|
struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
const u32 values[] = {
|
|
|
|
0x00000000,
|
|
|
|
0x01010101,
|
|
|
|
0x10100101,
|
|
|
|
0x03030303,
|
|
|
|
0x30300303,
|
|
|
|
0x05050505,
|
|
|
|
0x50500505,
|
|
|
|
0x0f0f0f0f,
|
|
|
|
0xf00ff00f,
|
|
|
|
0x10101010,
|
|
|
|
0xf0f01010,
|
|
|
|
0x30303030,
|
|
|
|
0xa0a03030,
|
|
|
|
0x50505050,
|
|
|
|
0xc0c05050,
|
|
|
|
0xf0f0f0f0,
|
|
|
|
0x11111111,
|
|
|
|
0x33333333,
|
|
|
|
0x55555555,
|
|
|
|
0x0000ffff,
|
|
|
|
0x00ff00ff,
|
|
|
|
0xff0000ff,
|
|
|
|
0xffff00ff,
|
|
|
|
0xffffffff,
|
|
|
|
};
|
2019-10-04 13:40:09 +00:00
|
|
|
struct i915_address_space *vm;
|
2019-03-01 16:01:08 +00:00
|
|
|
struct i915_vma *scratch;
|
|
|
|
struct i915_vma *batch;
|
|
|
|
int err = 0, i, v;
|
|
|
|
u32 *cs, *results;
|
|
|
|
|
2019-10-04 13:40:09 +00:00
|
|
|
vm = i915_gem_context_get_vm_rcu(ctx);
|
|
|
|
scratch = create_scratch(vm, 2 * ARRAY_SIZE(values) + 1);
|
|
|
|
i915_vm_put(vm);
|
2019-03-01 16:01:08 +00:00
|
|
|
if (IS_ERR(scratch))
|
|
|
|
return PTR_ERR(scratch);
|
|
|
|
|
|
|
|
batch = create_batch(ctx);
|
|
|
|
if (IS_ERR(batch)) {
|
|
|
|
err = PTR_ERR(batch);
|
|
|
|
goto out_scratch;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < engine->whitelist.count; i++) {
|
|
|
|
u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
|
|
|
|
u64 addr = scratch->node.start;
|
|
|
|
struct i915_request *rq;
|
|
|
|
u32 srm, lrm, rsvd;
|
|
|
|
u32 expect;
|
|
|
|
int idx;
|
2019-07-12 07:07:44 +00:00
|
|
|
bool ro_reg;
|
2019-03-01 16:01:08 +00:00
|
|
|
|
|
|
|
if (wo_register(engine, reg))
|
|
|
|
continue;
|
|
|
|
|
2019-07-12 07:07:44 +00:00
|
|
|
ro_reg = ro_register(reg);
|
2019-06-18 01:01:08 +00:00
|
|
|
|
2019-03-01 16:01:08 +00:00
|
|
|
srm = MI_STORE_REGISTER_MEM;
|
|
|
|
lrm = MI_LOAD_REGISTER_MEM;
|
|
|
|
if (INTEL_GEN(ctx->i915) >= 8)
|
|
|
|
lrm++, srm++;
|
|
|
|
|
|
|
|
pr_debug("%s: Writing garbage to %x\n",
|
|
|
|
engine->name, reg);
|
|
|
|
|
|
|
|
cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
|
|
|
|
if (IS_ERR(cs)) {
|
|
|
|
err = PTR_ERR(cs);
|
|
|
|
goto out_batch;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* SRM original */
|
|
|
|
*cs++ = srm;
|
|
|
|
*cs++ = reg;
|
|
|
|
*cs++ = lower_32_bits(addr);
|
|
|
|
*cs++ = upper_32_bits(addr);
|
|
|
|
|
|
|
|
idx = 1;
|
|
|
|
for (v = 0; v < ARRAY_SIZE(values); v++) {
|
|
|
|
/* LRI garbage */
|
|
|
|
*cs++ = MI_LOAD_REGISTER_IMM(1);
|
|
|
|
*cs++ = reg;
|
|
|
|
*cs++ = values[v];
|
|
|
|
|
|
|
|
/* SRM result */
|
|
|
|
*cs++ = srm;
|
|
|
|
*cs++ = reg;
|
|
|
|
*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
|
|
|
|
*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
|
|
|
|
idx++;
|
|
|
|
}
|
|
|
|
for (v = 0; v < ARRAY_SIZE(values); v++) {
|
|
|
|
/* LRI garbage */
|
|
|
|
*cs++ = MI_LOAD_REGISTER_IMM(1);
|
|
|
|
*cs++ = reg;
|
|
|
|
*cs++ = ~values[v];
|
|
|
|
|
|
|
|
/* SRM result */
|
|
|
|
*cs++ = srm;
|
|
|
|
*cs++ = reg;
|
|
|
|
*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
|
|
|
|
*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
|
|
|
|
idx++;
|
|
|
|
}
|
|
|
|
GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
|
|
|
|
|
|
|
|
/* LRM original -- don't leave garbage in the context! */
|
|
|
|
*cs++ = lrm;
|
|
|
|
*cs++ = reg;
|
|
|
|
*cs++ = lower_32_bits(addr);
|
|
|
|
*cs++ = upper_32_bits(addr);
|
|
|
|
|
|
|
|
*cs++ = MI_BATCH_BUFFER_END;
|
|
|
|
|
drm/i915: Flush pages on acquisition
When we return pages to the system, we ensure that they are marked as
being in the CPU domain since any external access is uncontrolled and we
must assume the worst. This means that we need to always flush the pages
on acquisition if we need to use them on the GPU, and from the beginning
have used set-domain. Set-domain is overkill for the purpose as it is a
general synchronisation barrier, but our intent is to only flush the
pages being swapped in. If we move that flush into the pages acquisition
phase, we know then that when we have obj->mm.pages, they are coherent
with the GPU and need only maintain that status without resorting to
heavy handed use of set-domain.
The principle knock-on effect for userspace is through mmap-gtt
pagefaulting. Our uAPI has always implied that the GTT mmap was async
(especially as when any pagefault occurs is unpredicatable to userspace)
and so userspace had to apply explicit domain control itself
(set-domain). However, swapping is transparent to the kernel, and so on
first fault we need to acquire the pages and make them coherent for
access through the GTT. Our use of set-domain here leaks into the uABI
that the first pagefault was synchronous. This is unintentional and
baring a few igt should be unoticed, nevertheless we bump the uABI
version for mmap-gtt to reflect the change in behaviour.
Another implication of the change is that gem_create() is presumed to
create an object that is coherent with the CPU and is in the CPU write
domain, so a set-domain(CPU) following a gem_create() would be a minor
operation that merely checked whether we could allocate all pages for
the object. On applying this change, a set-domain(CPU) causes a clflush
as we acquire the pages. This will have a small impact on mesa as we move
the clflush here on !llc from execbuf time to create, but that should
have minimal performance impact as the same clflush exists but is now
done early and because of the clflush issue, userspace recycles bo and
so should resist allocating fresh objects.
Internally, the presumption that objects are created in the CPU
write-domain and remain so through writes to obj->mm.mapping is more
prevalent than I expected; but easy enough to catch and apply a manual
flush.
For the future, we should push the page flush from the central
set_pages() into the callers so that we can more finely control when it
is applied, but for now doing it one location is easier to validate, at
the cost of sometimes flushing when there is no need.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Antonio Argenziano <antonio.argenziano@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321161908.8007-1-chris@chris-wilson.co.uk
2019-03-21 16:19:07 +00:00
|
|
|
i915_gem_object_flush_map(batch->obj);
|
2019-03-01 16:01:08 +00:00
|
|
|
i915_gem_object_unpin_map(batch->obj);
|
2019-06-21 07:08:02 +00:00
|
|
|
intel_gt_chipset_flush(engine->gt);
|
2019-03-01 16:01:08 +00:00
|
|
|
|
2019-04-26 16:33:36 +00:00
|
|
|
rq = igt_request_alloc(ctx, engine);
|
2019-03-01 16:01:08 +00:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto out_batch;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
|
|
|
|
err = engine->emit_init_breadcrumb(rq);
|
|
|
|
if (err)
|
|
|
|
goto err_request;
|
|
|
|
}
|
|
|
|
|
2019-08-26 07:21:22 +00:00
|
|
|
i915_vma_lock(batch);
|
|
|
|
err = i915_request_await_object(rq, batch->obj, false);
|
|
|
|
if (err == 0)
|
|
|
|
err = i915_vma_move_to_active(batch, rq, 0);
|
|
|
|
i915_vma_unlock(batch);
|
|
|
|
if (err)
|
|
|
|
goto err_request;
|
|
|
|
|
2019-03-01 16:01:08 +00:00
|
|
|
err = engine->emit_bb_start(rq,
|
|
|
|
batch->node.start, PAGE_SIZE,
|
|
|
|
0);
|
|
|
|
if (err)
|
|
|
|
goto err_request;
|
|
|
|
|
|
|
|
err_request:
|
2019-10-09 06:17:59 +00:00
|
|
|
err = request_add_sync(rq, err);
|
|
|
|
if (err) {
|
2019-03-01 16:01:08 +00:00
|
|
|
pr_err("%s: Futzing %x timedout; cancelling test\n",
|
|
|
|
engine->name, reg);
|
2019-07-12 19:29:53 +00:00
|
|
|
intel_gt_set_wedged(&ctx->i915->gt);
|
2019-03-01 16:01:08 +00:00
|
|
|
goto out_batch;
|
|
|
|
}
|
|
|
|
|
|
|
|
results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
|
|
|
|
if (IS_ERR(results)) {
|
|
|
|
err = PTR_ERR(results);
|
|
|
|
goto out_batch;
|
|
|
|
}
|
|
|
|
|
|
|
|
GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
|
2019-07-12 07:07:44 +00:00
|
|
|
if (!ro_reg) {
|
|
|
|
/* detect write masking */
|
|
|
|
rsvd = results[ARRAY_SIZE(values)];
|
|
|
|
if (!rsvd) {
|
|
|
|
pr_err("%s: Unable to write to whitelisted register %x\n",
|
|
|
|
engine->name, reg);
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out_unpin;
|
|
|
|
}
|
2019-03-01 16:01:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
expect = results[0];
|
|
|
|
idx = 1;
|
|
|
|
for (v = 0; v < ARRAY_SIZE(values); v++) {
|
2019-07-12 07:07:44 +00:00
|
|
|
if (ro_reg)
|
|
|
|
expect = results[0];
|
|
|
|
else
|
|
|
|
expect = reg_write(expect, values[v], rsvd);
|
|
|
|
|
2019-03-01 16:01:08 +00:00
|
|
|
if (results[idx] != expect)
|
|
|
|
err++;
|
|
|
|
idx++;
|
|
|
|
}
|
|
|
|
for (v = 0; v < ARRAY_SIZE(values); v++) {
|
2019-07-12 07:07:44 +00:00
|
|
|
if (ro_reg)
|
|
|
|
expect = results[0];
|
|
|
|
else
|
|
|
|
expect = reg_write(expect, ~values[v], rsvd);
|
|
|
|
|
2019-03-01 16:01:08 +00:00
|
|
|
if (results[idx] != expect)
|
|
|
|
err++;
|
|
|
|
idx++;
|
|
|
|
}
|
|
|
|
if (err) {
|
|
|
|
pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
|
|
|
|
engine->name, err, reg);
|
|
|
|
|
2019-07-12 07:07:44 +00:00
|
|
|
if (ro_reg)
|
|
|
|
pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
|
|
|
|
engine->name, reg, results[0]);
|
|
|
|
else
|
|
|
|
pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
|
|
|
|
engine->name, reg, results[0], rsvd);
|
2019-03-01 16:01:08 +00:00
|
|
|
|
|
|
|
expect = results[0];
|
|
|
|
idx = 1;
|
|
|
|
for (v = 0; v < ARRAY_SIZE(values); v++) {
|
|
|
|
u32 w = values[v];
|
|
|
|
|
2019-07-12 07:07:44 +00:00
|
|
|
if (ro_reg)
|
|
|
|
expect = results[0];
|
|
|
|
else
|
|
|
|
expect = reg_write(expect, w, rsvd);
|
2019-03-01 16:01:08 +00:00
|
|
|
pr_info("Wrote %08x, read %08x, expect %08x\n",
|
|
|
|
w, results[idx], expect);
|
|
|
|
idx++;
|
|
|
|
}
|
|
|
|
for (v = 0; v < ARRAY_SIZE(values); v++) {
|
|
|
|
u32 w = ~values[v];
|
|
|
|
|
2019-07-12 07:07:44 +00:00
|
|
|
if (ro_reg)
|
|
|
|
expect = results[0];
|
|
|
|
else
|
|
|
|
expect = reg_write(expect, w, rsvd);
|
2019-03-01 16:01:08 +00:00
|
|
|
pr_info("Wrote %08x, read %08x, expect %08x\n",
|
|
|
|
w, results[idx], expect);
|
|
|
|
idx++;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = -EINVAL;
|
|
|
|
}
|
|
|
|
out_unpin:
|
|
|
|
i915_gem_object_unpin_map(scratch->obj);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-10-04 13:40:02 +00:00
|
|
|
if (igt_flush_test(ctx->i915))
|
2019-03-01 16:01:08 +00:00
|
|
|
err = -EIO;
|
|
|
|
out_batch:
|
|
|
|
i915_vma_unpin_and_release(&batch, 0);
|
|
|
|
out_scratch:
|
|
|
|
i915_vma_unpin_and_release(&scratch, 0);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int live_dirty_whitelist(void *arg)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
struct i915_gem_context *ctx;
|
|
|
|
enum intel_engine_id id;
|
|
|
|
struct drm_file *file;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
/* Can the user write to the whitelisted registers? */
|
|
|
|
|
|
|
|
if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
file = mock_file(i915);
|
2019-10-09 06:17:59 +00:00
|
|
|
if (IS_ERR(file))
|
|
|
|
return PTR_ERR(file);
|
2019-03-01 16:01:08 +00:00
|
|
|
|
|
|
|
ctx = live_context(i915, file);
|
|
|
|
if (IS_ERR(ctx)) {
|
|
|
|
err = PTR_ERR(ctx);
|
|
|
|
goto out_file;
|
|
|
|
}
|
|
|
|
|
|
|
|
for_each_engine(engine, i915, id) {
|
|
|
|
if (engine->whitelist.count == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
err = check_dirty_whitelist(ctx, engine);
|
|
|
|
if (err)
|
|
|
|
goto out_file;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_file:
|
|
|
|
mock_file_free(i915, file);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-04-14 12:27:54 +00:00
|
|
|
static int live_reset_whitelist(void *arg)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
2019-03-05 18:03:30 +00:00
|
|
|
struct intel_engine_cs *engine = i915->engine[RCS0];
|
2018-04-24 13:15:45 +00:00
|
|
|
int err = 0;
|
2018-04-14 12:27:54 +00:00
|
|
|
|
|
|
|
/* If we reset the gpu, we should not lose the RING_NONPRIV */
|
|
|
|
|
2018-12-03 12:50:12 +00:00
|
|
|
if (!engine || engine->whitelist.count == 0)
|
2018-04-14 12:27:54 +00:00
|
|
|
return 0;
|
|
|
|
|
2019-07-12 19:29:53 +00:00
|
|
|
igt_global_reset_lock(&i915->gt);
|
2018-04-14 12:27:54 +00:00
|
|
|
|
2019-09-27 21:17:47 +00:00
|
|
|
if (intel_has_reset_engine(&i915->gt)) {
|
2018-04-14 12:27:54 +00:00
|
|
|
err = check_whitelist_across_reset(engine,
|
2018-12-03 12:50:12 +00:00
|
|
|
do_engine_reset,
|
2018-04-14 12:27:54 +00:00
|
|
|
"engine");
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-09-27 21:17:47 +00:00
|
|
|
if (intel_has_gpu_reset(&i915->gt)) {
|
2018-04-14 12:27:54 +00:00
|
|
|
err = check_whitelist_across_reset(engine,
|
2018-12-03 12:50:12 +00:00
|
|
|
do_device_reset,
|
2018-04-14 12:27:54 +00:00
|
|
|
"device");
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2019-07-12 19:29:53 +00:00
|
|
|
igt_global_reset_unlock(&i915->gt);
|
2018-04-14 12:27:54 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-04-24 11:09:41 +00:00
|
|
|
static int read_whitelisted_registers(struct i915_gem_context *ctx,
|
|
|
|
struct intel_engine_cs *engine,
|
|
|
|
struct i915_vma *results)
|
|
|
|
{
|
|
|
|
struct i915_request *rq;
|
|
|
|
int i, err = 0;
|
|
|
|
u32 srm, *cs;
|
|
|
|
|
2019-04-26 16:33:36 +00:00
|
|
|
rq = igt_request_alloc(ctx, engine);
|
2019-04-24 11:09:41 +00:00
|
|
|
if (IS_ERR(rq))
|
|
|
|
return PTR_ERR(rq);
|
|
|
|
|
|
|
|
srm = MI_STORE_REGISTER_MEM;
|
|
|
|
if (INTEL_GEN(ctx->i915) >= 8)
|
|
|
|
srm++;
|
|
|
|
|
|
|
|
cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
|
|
|
|
if (IS_ERR(cs)) {
|
|
|
|
err = PTR_ERR(cs);
|
|
|
|
goto err_req;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < engine->whitelist.count; i++) {
|
|
|
|
u64 offset = results->node.start + sizeof(u32) * i;
|
2019-06-18 01:01:08 +00:00
|
|
|
u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
|
|
|
|
|
2019-07-12 07:07:43 +00:00
|
|
|
/* Clear access permission field */
|
|
|
|
reg &= ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
|
2019-04-24 11:09:41 +00:00
|
|
|
|
|
|
|
*cs++ = srm;
|
2019-06-18 01:01:08 +00:00
|
|
|
*cs++ = reg;
|
2019-04-24 11:09:41 +00:00
|
|
|
*cs++ = lower_32_bits(offset);
|
|
|
|
*cs++ = upper_32_bits(offset);
|
|
|
|
}
|
|
|
|
intel_ring_advance(rq, cs);
|
|
|
|
|
|
|
|
err_req:
|
2019-10-09 06:17:59 +00:00
|
|
|
return request_add_sync(rq, err);
|
2019-04-24 11:09:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
|
|
|
|
struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
struct i915_request *rq;
|
|
|
|
struct i915_vma *batch;
|
|
|
|
int i, err = 0;
|
|
|
|
u32 *cs;
|
|
|
|
|
|
|
|
batch = create_batch(ctx);
|
|
|
|
if (IS_ERR(batch))
|
|
|
|
return PTR_ERR(batch);
|
|
|
|
|
|
|
|
cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
|
|
|
|
if (IS_ERR(cs)) {
|
|
|
|
err = PTR_ERR(cs);
|
|
|
|
goto err_batch;
|
|
|
|
}
|
|
|
|
|
2019-06-18 01:01:08 +00:00
|
|
|
*cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
|
2019-04-24 11:09:41 +00:00
|
|
|
for (i = 0; i < engine->whitelist.count; i++) {
|
2019-06-18 01:01:08 +00:00
|
|
|
u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
|
|
|
|
|
|
|
|
if (ro_register(reg))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
*cs++ = reg;
|
2019-04-24 11:09:41 +00:00
|
|
|
*cs++ = 0xffffffff;
|
|
|
|
}
|
|
|
|
*cs++ = MI_BATCH_BUFFER_END;
|
|
|
|
|
|
|
|
i915_gem_object_flush_map(batch->obj);
|
2019-06-21 07:08:02 +00:00
|
|
|
intel_gt_chipset_flush(engine->gt);
|
2019-04-24 11:09:41 +00:00
|
|
|
|
2019-04-26 16:33:36 +00:00
|
|
|
rq = igt_request_alloc(ctx, engine);
|
2019-04-24 11:09:41 +00:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto err_unpin;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
|
|
|
|
err = engine->emit_init_breadcrumb(rq);
|
|
|
|
if (err)
|
|
|
|
goto err_request;
|
|
|
|
}
|
|
|
|
|
2019-08-26 07:21:22 +00:00
|
|
|
i915_vma_lock(batch);
|
|
|
|
err = i915_request_await_object(rq, batch->obj, false);
|
|
|
|
if (err == 0)
|
|
|
|
err = i915_vma_move_to_active(batch, rq, 0);
|
|
|
|
i915_vma_unlock(batch);
|
|
|
|
if (err)
|
|
|
|
goto err_request;
|
|
|
|
|
2019-04-24 11:09:41 +00:00
|
|
|
/* Perform the writes from an unprivileged "user" batch */
|
|
|
|
err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
|
|
|
|
|
|
|
|
err_request:
|
2019-10-09 06:17:59 +00:00
|
|
|
err = request_add_sync(rq, err);
|
2019-04-24 11:09:41 +00:00
|
|
|
|
|
|
|
err_unpin:
|
|
|
|
i915_gem_object_unpin_map(batch->obj);
|
|
|
|
err_batch:
|
|
|
|
i915_vma_unpin_and_release(&batch, 0);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct regmask {
|
|
|
|
i915_reg_t reg;
|
|
|
|
unsigned long gen_mask;
|
|
|
|
};
|
|
|
|
|
|
|
|
static bool find_reg(struct drm_i915_private *i915,
|
|
|
|
i915_reg_t reg,
|
|
|
|
const struct regmask *tbl,
|
|
|
|
unsigned long count)
|
|
|
|
{
|
|
|
|
u32 offset = i915_mmio_reg_offset(reg);
|
|
|
|
|
|
|
|
while (count--) {
|
|
|
|
if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
|
|
|
|
i915_mmio_reg_offset(tbl->reg) == offset)
|
|
|
|
return true;
|
|
|
|
tbl++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
|
|
|
|
{
|
|
|
|
/* Alas, we must pardon some whitelists. Mistakes already made */
|
|
|
|
static const struct regmask pardon[] = {
|
|
|
|
{ GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
|
|
|
|
{ GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
|
|
|
|
};
|
|
|
|
|
|
|
|
return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool result_eq(struct intel_engine_cs *engine,
|
|
|
|
u32 a, u32 b, i915_reg_t reg)
|
|
|
|
{
|
|
|
|
if (a != b && !pardon_reg(engine->i915, reg)) {
|
|
|
|
pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
|
|
|
|
i915_mmio_reg_offset(reg), a, b);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
|
|
|
|
{
|
|
|
|
/* Some registers do not seem to behave and our writes unreadable */
|
|
|
|
static const struct regmask wo[] = {
|
|
|
|
{ GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
|
|
|
|
};
|
|
|
|
|
|
|
|
return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool result_neq(struct intel_engine_cs *engine,
|
|
|
|
u32 a, u32 b, i915_reg_t reg)
|
|
|
|
{
|
|
|
|
if (a == b && !writeonly_reg(engine->i915, reg)) {
|
|
|
|
pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
|
|
|
|
i915_mmio_reg_offset(reg), a);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
check_whitelisted_registers(struct intel_engine_cs *engine,
|
|
|
|
struct i915_vma *A,
|
|
|
|
struct i915_vma *B,
|
|
|
|
bool (*fn)(struct intel_engine_cs *engine,
|
|
|
|
u32 a, u32 b,
|
|
|
|
i915_reg_t reg))
|
|
|
|
{
|
|
|
|
u32 *a, *b;
|
|
|
|
int i, err;
|
|
|
|
|
|
|
|
a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
|
|
|
|
if (IS_ERR(a))
|
|
|
|
return PTR_ERR(a);
|
|
|
|
|
|
|
|
b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
|
|
|
|
if (IS_ERR(b)) {
|
|
|
|
err = PTR_ERR(b);
|
|
|
|
goto err_a;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
for (i = 0; i < engine->whitelist.count; i++) {
|
2019-06-29 13:13:50 +00:00
|
|
|
const struct i915_wa *wa = &engine->whitelist.list[i];
|
|
|
|
|
2019-07-12 07:07:43 +00:00
|
|
|
if (i915_mmio_reg_offset(wa->reg) &
|
|
|
|
RING_FORCE_TO_NONPRIV_ACCESS_RD)
|
2019-06-29 13:13:50 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!fn(engine, a[i], b[i], wa->reg))
|
2019-04-24 11:09:41 +00:00
|
|
|
err = -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_gem_object_unpin_map(B->obj);
|
|
|
|
err_a:
|
|
|
|
i915_gem_object_unpin_map(A->obj);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int live_isolated_whitelist(void *arg)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
struct {
|
|
|
|
struct i915_gem_context *ctx;
|
|
|
|
struct i915_vma *scratch[2];
|
|
|
|
} client[2] = {};
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
enum intel_engine_id id;
|
|
|
|
int i, err = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check that a write into a whitelist register works, but
|
|
|
|
* invisible to a second context.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!intel_engines_has_context_isolation(i915))
|
|
|
|
return 0;
|
|
|
|
|
2019-06-11 09:12:37 +00:00
|
|
|
if (!i915->kernel_context->vm)
|
2019-04-24 11:09:41 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(client); i++) {
|
2019-10-04 13:40:09 +00:00
|
|
|
struct i915_address_space *vm;
|
2019-04-24 11:09:41 +00:00
|
|
|
struct i915_gem_context *c;
|
|
|
|
|
|
|
|
c = kernel_context(i915);
|
|
|
|
if (IS_ERR(c)) {
|
|
|
|
err = PTR_ERR(c);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2019-10-04 13:40:09 +00:00
|
|
|
vm = i915_gem_context_get_vm_rcu(c);
|
|
|
|
|
|
|
|
client[i].scratch[0] = create_scratch(vm, 1024);
|
2019-04-24 11:09:41 +00:00
|
|
|
if (IS_ERR(client[i].scratch[0])) {
|
|
|
|
err = PTR_ERR(client[i].scratch[0]);
|
2019-10-04 13:40:09 +00:00
|
|
|
i915_vm_put(vm);
|
2019-04-24 11:09:41 +00:00
|
|
|
kernel_context_close(c);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2019-10-04 13:40:09 +00:00
|
|
|
client[i].scratch[1] = create_scratch(vm, 1024);
|
2019-04-24 11:09:41 +00:00
|
|
|
if (IS_ERR(client[i].scratch[1])) {
|
|
|
|
err = PTR_ERR(client[i].scratch[1]);
|
|
|
|
i915_vma_unpin_and_release(&client[i].scratch[0], 0);
|
2019-10-04 13:40:09 +00:00
|
|
|
i915_vm_put(vm);
|
2019-04-24 11:09:41 +00:00
|
|
|
kernel_context_close(c);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
client[i].ctx = c;
|
2019-10-04 13:40:09 +00:00
|
|
|
i915_vm_put(vm);
|
2019-04-24 11:09:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for_each_engine(engine, i915, id) {
|
2019-06-18 01:01:08 +00:00
|
|
|
if (!whitelist_writable_count(engine))
|
2019-04-24 11:09:41 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Read default values */
|
|
|
|
err = read_whitelisted_registers(client[0].ctx, engine,
|
|
|
|
client[0].scratch[0]);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
/* Try to overwrite registers (should only affect ctx0) */
|
|
|
|
err = scrub_whitelisted_registers(client[0].ctx, engine);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
/* Read values from ctx1, we expect these to be defaults */
|
|
|
|
err = read_whitelisted_registers(client[1].ctx, engine,
|
|
|
|
client[1].scratch[0]);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
/* Verify that both reads return the same default values */
|
|
|
|
err = check_whitelisted_registers(engine,
|
|
|
|
client[0].scratch[0],
|
|
|
|
client[1].scratch[0],
|
|
|
|
result_eq);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
/* Read back the updated values in ctx0 */
|
|
|
|
err = read_whitelisted_registers(client[0].ctx, engine,
|
|
|
|
client[0].scratch[1]);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
/* User should be granted privilege to overwhite regs */
|
|
|
|
err = check_whitelisted_registers(engine,
|
|
|
|
client[0].scratch[0],
|
|
|
|
client[0].scratch[1],
|
|
|
|
result_neq);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err:
|
|
|
|
for (i = 0; i < ARRAY_SIZE(client); i++) {
|
|
|
|
if (!client[i].ctx)
|
|
|
|
break;
|
|
|
|
|
|
|
|
i915_vma_unpin_and_release(&client[i].scratch[1], 0);
|
|
|
|
i915_vma_unpin_and_release(&client[i].scratch[0], 0);
|
|
|
|
kernel_context_close(client[i].ctx);
|
|
|
|
}
|
|
|
|
|
2019-10-04 13:40:02 +00:00
|
|
|
if (igt_flush_test(i915))
|
2019-04-24 11:09:41 +00:00
|
|
|
err = -EIO;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-05-20 14:25:46 +00:00
|
|
|
static bool
|
|
|
|
verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
|
|
|
|
const char *str)
|
2018-12-03 12:50:11 +00:00
|
|
|
{
|
2019-05-20 14:25:46 +00:00
|
|
|
struct drm_i915_private *i915 = ctx->i915;
|
|
|
|
struct i915_gem_engines_iter it;
|
|
|
|
struct intel_context *ce;
|
2018-12-03 12:50:11 +00:00
|
|
|
bool ok = true;
|
|
|
|
|
2019-04-12 20:24:57 +00:00
|
|
|
ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
|
2019-01-10 01:32:31 +00:00
|
|
|
|
2019-07-31 08:11:26 +00:00
|
|
|
for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
|
2019-05-20 14:25:46 +00:00
|
|
|
enum intel_engine_id id = ce->engine->id;
|
|
|
|
|
|
|
|
ok &= engine_wa_list_verify(ce,
|
2019-04-17 07:56:28 +00:00
|
|
|
&lists->engine[id].wa_list,
|
|
|
|
str) == 0;
|
2019-05-20 14:25:46 +00:00
|
|
|
|
|
|
|
ok &= engine_wa_list_verify(ce,
|
|
|
|
&lists->engine[id].ctx_wa_list,
|
|
|
|
str) == 0;
|
2019-04-17 07:56:28 +00:00
|
|
|
}
|
2018-12-03 12:50:11 +00:00
|
|
|
|
|
|
|
return ok;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2019-05-20 14:25:46 +00:00
|
|
|
live_gpu_reset_workarounds(void *arg)
|
2018-12-03 12:50:11 +00:00
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
2019-05-20 14:25:46 +00:00
|
|
|
struct i915_gem_context *ctx;
|
2019-01-14 14:21:22 +00:00
|
|
|
intel_wakeref_t wakeref;
|
2019-01-10 01:32:31 +00:00
|
|
|
struct wa_lists lists;
|
2018-12-03 12:50:11 +00:00
|
|
|
bool ok;
|
|
|
|
|
2019-09-27 21:17:47 +00:00
|
|
|
if (!intel_has_gpu_reset(&i915->gt))
|
2018-12-03 12:50:11 +00:00
|
|
|
return 0;
|
|
|
|
|
2019-05-20 14:25:46 +00:00
|
|
|
ctx = kernel_context(i915);
|
|
|
|
if (IS_ERR(ctx))
|
|
|
|
return PTR_ERR(ctx);
|
|
|
|
|
2019-07-31 08:11:26 +00:00
|
|
|
i915_gem_context_lock_engines(ctx);
|
|
|
|
|
2018-12-03 12:50:11 +00:00
|
|
|
pr_info("Verifying after GPU reset...\n");
|
|
|
|
|
2019-07-12 19:29:53 +00:00
|
|
|
igt_global_reset_lock(&i915->gt);
|
2019-06-13 23:21:54 +00:00
|
|
|
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
2019-01-14 14:21:22 +00:00
|
|
|
|
2019-01-10 01:32:31 +00:00
|
|
|
reference_lists_init(i915, &lists);
|
2018-12-03 12:50:11 +00:00
|
|
|
|
2019-05-20 14:25:46 +00:00
|
|
|
ok = verify_wa_lists(ctx, &lists, "before reset");
|
2018-12-03 12:50:11 +00:00
|
|
|
if (!ok)
|
|
|
|
goto out;
|
|
|
|
|
2019-07-12 19:29:53 +00:00
|
|
|
intel_gt_reset(&i915->gt, ALL_ENGINES, "live_workarounds");
|
2018-12-03 12:50:11 +00:00
|
|
|
|
2019-05-20 14:25:46 +00:00
|
|
|
ok = verify_wa_lists(ctx, &lists, "after reset");
|
2018-12-03 12:50:11 +00:00
|
|
|
|
|
|
|
out:
|
2019-07-31 08:11:26 +00:00
|
|
|
i915_gem_context_unlock_engines(ctx);
|
2019-05-20 14:25:46 +00:00
|
|
|
kernel_context_close(ctx);
|
2019-01-10 01:32:31 +00:00
|
|
|
reference_lists_fini(i915, &lists);
|
2019-06-13 23:21:54 +00:00
|
|
|
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
2019-07-12 19:29:53 +00:00
|
|
|
igt_global_reset_unlock(&i915->gt);
|
2018-12-03 12:50:11 +00:00
|
|
|
|
|
|
|
return ok ? 0 : -ESRCH;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2019-05-20 14:25:46 +00:00
|
|
|
live_engine_reset_workarounds(void *arg)
|
2018-12-03 12:50:11 +00:00
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
2019-07-31 08:11:26 +00:00
|
|
|
struct i915_gem_engines_iter it;
|
2018-12-03 12:50:11 +00:00
|
|
|
struct i915_gem_context *ctx;
|
2019-07-31 08:11:26 +00:00
|
|
|
struct intel_context *ce;
|
2018-12-03 12:50:11 +00:00
|
|
|
struct igt_spinner spin;
|
|
|
|
struct i915_request *rq;
|
2019-01-14 14:21:22 +00:00
|
|
|
intel_wakeref_t wakeref;
|
2019-01-10 01:32:31 +00:00
|
|
|
struct wa_lists lists;
|
2018-12-03 12:50:11 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2019-09-27 21:17:47 +00:00
|
|
|
if (!intel_has_reset_engine(&i915->gt))
|
2018-12-03 12:50:11 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
ctx = kernel_context(i915);
|
|
|
|
if (IS_ERR(ctx))
|
|
|
|
return PTR_ERR(ctx);
|
|
|
|
|
2019-07-12 19:29:53 +00:00
|
|
|
igt_global_reset_lock(&i915->gt);
|
2019-06-13 23:21:54 +00:00
|
|
|
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
2019-01-14 14:21:22 +00:00
|
|
|
|
2019-01-10 01:32:31 +00:00
|
|
|
reference_lists_init(i915, &lists);
|
2018-12-03 12:50:11 +00:00
|
|
|
|
2019-07-31 08:11:26 +00:00
|
|
|
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
|
|
|
|
struct intel_engine_cs *engine = ce->engine;
|
2018-12-03 12:50:11 +00:00
|
|
|
bool ok;
|
|
|
|
|
|
|
|
pr_info("Verifying after %s reset...\n", engine->name);
|
|
|
|
|
2019-05-20 14:25:46 +00:00
|
|
|
ok = verify_wa_lists(ctx, &lists, "before reset");
|
2018-12-03 12:50:11 +00:00
|
|
|
if (!ok) {
|
|
|
|
ret = -ESRCH;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2019-07-12 19:29:53 +00:00
|
|
|
intel_engine_reset(engine, "live_workarounds");
|
2018-12-03 12:50:11 +00:00
|
|
|
|
2019-05-20 14:25:46 +00:00
|
|
|
ok = verify_wa_lists(ctx, &lists, "after idle reset");
|
2018-12-03 12:50:11 +00:00
|
|
|
if (!ok) {
|
|
|
|
ret = -ESRCH;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2019-07-31 08:11:26 +00:00
|
|
|
ret = igt_spinner_init(&spin, engine->gt);
|
2018-12-03 12:50:11 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
2019-07-31 08:11:26 +00:00
|
|
|
rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
|
2018-12-03 12:50:11 +00:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
ret = PTR_ERR(rq);
|
|
|
|
igt_spinner_fini(&spin);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2019-10-09 06:17:59 +00:00
|
|
|
ret = request_add_spin(rq, &spin);
|
|
|
|
if (ret) {
|
2018-12-03 12:50:11 +00:00
|
|
|
pr_err("Spinner failed to start\n");
|
|
|
|
igt_spinner_fini(&spin);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2019-07-12 19:29:53 +00:00
|
|
|
intel_engine_reset(engine, "live_workarounds");
|
2018-12-03 12:50:11 +00:00
|
|
|
|
|
|
|
igt_spinner_end(&spin);
|
|
|
|
igt_spinner_fini(&spin);
|
|
|
|
|
2019-05-20 14:25:46 +00:00
|
|
|
ok = verify_wa_lists(ctx, &lists, "after busy reset");
|
2018-12-03 12:50:11 +00:00
|
|
|
if (!ok) {
|
|
|
|
ret = -ESRCH;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
err:
|
2019-07-31 08:11:26 +00:00
|
|
|
i915_gem_context_unlock_engines(ctx);
|
2019-01-10 01:32:31 +00:00
|
|
|
reference_lists_fini(i915, &lists);
|
2019-06-13 23:21:54 +00:00
|
|
|
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
2019-07-12 19:29:53 +00:00
|
|
|
igt_global_reset_unlock(&i915->gt);
|
2018-12-03 12:50:11 +00:00
|
|
|
kernel_context_close(ctx);
|
|
|
|
|
2019-10-04 13:40:02 +00:00
|
|
|
igt_flush_test(i915);
|
2018-12-03 12:50:11 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-04-14 12:27:54 +00:00
|
|
|
int intel_workarounds_live_selftests(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
static const struct i915_subtest tests[] = {
|
2019-03-01 16:01:08 +00:00
|
|
|
SUBTEST(live_dirty_whitelist),
|
2018-04-14 12:27:54 +00:00
|
|
|
SUBTEST(live_reset_whitelist),
|
2019-04-24 11:09:41 +00:00
|
|
|
SUBTEST(live_isolated_whitelist),
|
2019-05-20 14:25:46 +00:00
|
|
|
SUBTEST(live_gpu_reset_workarounds),
|
|
|
|
SUBTEST(live_engine_reset_workarounds),
|
2018-04-14 12:27:54 +00:00
|
|
|
};
|
|
|
|
|
2019-07-12 19:29:53 +00:00
|
|
|
if (intel_gt_is_wedged(&i915->gt))
|
2018-07-06 06:53:11 +00:00
|
|
|
return 0;
|
|
|
|
|
2019-10-04 13:40:14 +00:00
|
|
|
return i915_subtests(tests, i915);
|
2018-04-14 12:27:54 +00:00
|
|
|
}
|