drm/i915/selftests: Add initial GuC selftest for scrubbing lost G2H
While debugging an issue with full GT resets I went down a rabbit hole thinking the scrubbing of lost G2H wasn't working correctly. This proved to be incorrect as this was working just fine but this chase inspired me to write a selftest to prove that this works. This simple selftest injects errors dropping various G2H and then issues a full GT reset proving that the scrubbing of these G2H doesn't blow up. v2: (Daniel Vetter) - Use ifdef instead of macros for selftests v3: (Checkpatch) - A space after 'switch' statement v4: (Daniele) - A comment saying GT won't idle if G2H are lost Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210909164744.31249-12-matthew.brost@intel.com
This commit is contained in:
committed by
John Harrison
parent
d135865cb8
commit
d2420c2ed8
@@ -198,6 +198,24 @@ struct intel_context {
|
||||
*/
|
||||
u8 guc_prio;
|
||||
u32 guc_prio_count[GUC_CLIENT_PRIORITY_NUM];
|
||||
|
||||
#ifdef CONFIG_DRM_I915_SELFTEST
|
||||
/**
|
||||
* @drop_schedule_enable: Force drop of schedule enable G2H for selftest
|
||||
*/
|
||||
bool drop_schedule_enable;
|
||||
|
||||
/**
|
||||
* @drop_schedule_disable: Force drop of schedule disable G2H for
|
||||
* selftest
|
||||
*/
|
||||
bool drop_schedule_disable;
|
||||
|
||||
/**
|
||||
* @drop_deregister: Force drop of deregister G2H for selftest
|
||||
*/
|
||||
bool drop_deregister;
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif /* __INTEL_CONTEXT_TYPES__ */
|
||||
|
||||
@@ -2639,6 +2639,13 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
|
||||
|
||||
trace_intel_context_deregister_done(ce);
|
||||
|
||||
#ifdef CONFIG_DRM_I915_SELFTEST
|
||||
if (unlikely(ce->drop_deregister)) {
|
||||
ce->drop_deregister = false;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (context_wait_for_deregister_to_register(ce)) {
|
||||
struct intel_runtime_pm *runtime_pm =
|
||||
&ce->engine->gt->i915->runtime_pm;
|
||||
@@ -2693,10 +2700,24 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
|
||||
trace_intel_context_sched_done(ce);
|
||||
|
||||
if (context_pending_enable(ce)) {
|
||||
#ifdef CONFIG_DRM_I915_SELFTEST
|
||||
if (unlikely(ce->drop_schedule_enable)) {
|
||||
ce->drop_schedule_enable = false;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
clr_context_pending_enable(ce);
|
||||
} else if (context_pending_disable(ce)) {
|
||||
bool banned;
|
||||
|
||||
#ifdef CONFIG_DRM_I915_SELFTEST
|
||||
if (unlikely(ce->drop_schedule_disable)) {
|
||||
ce->drop_schedule_disable = false;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Unpin must be done before __guc_signal_context_fence,
|
||||
* otherwise a race exists between the requests getting
|
||||
@@ -3073,3 +3094,7 @@ bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve)
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
#include "selftest_guc.c"
|
||||
#endif
|
||||
|
||||
127
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
Normal file
127
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
Normal file
@@ -0,0 +1,127 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright <EFBFBD><EFBFBD> 2021 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "selftests/intel_scheduler_helpers.h"
|
||||
|
||||
static struct i915_request *nop_user_request(struct intel_context *ce,
|
||||
struct i915_request *from)
|
||||
{
|
||||
struct i915_request *rq;
|
||||
int ret;
|
||||
|
||||
rq = intel_context_create_request(ce);
|
||||
if (IS_ERR(rq))
|
||||
return rq;
|
||||
|
||||
if (from) {
|
||||
ret = i915_sw_fence_await_dma_fence(&rq->submit,
|
||||
&from->fence, 0,
|
||||
I915_FENCE_GFP);
|
||||
if (ret < 0) {
|
||||
i915_request_put(rq);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
}
|
||||
|
||||
i915_request_get(rq);
|
||||
i915_request_add(rq);
|
||||
|
||||
return rq;
|
||||
}
|
||||
|
||||
static int intel_guc_scrub_ctbs(void *arg)
|
||||
{
|
||||
struct intel_gt *gt = arg;
|
||||
int ret = 0;
|
||||
int i;
|
||||
struct i915_request *last[3] = {NULL, NULL, NULL}, *rq;
|
||||
intel_wakeref_t wakeref;
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_context *ce;
|
||||
|
||||
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
|
||||
engine = intel_selftest_find_any_engine(gt);
|
||||
|
||||
/* Submit requests and inject errors forcing G2H to be dropped */
|
||||
for (i = 0; i < 3; ++i) {
|
||||
ce = intel_context_create(engine);
|
||||
if (IS_ERR(ce)) {
|
||||
ret = PTR_ERR(ce);
|
||||
pr_err("Failed to create context, %d: %d\n", i, ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
switch (i) {
|
||||
case 0:
|
||||
ce->drop_schedule_enable = true;
|
||||
break;
|
||||
case 1:
|
||||
ce->drop_schedule_disable = true;
|
||||
break;
|
||||
case 2:
|
||||
ce->drop_deregister = true;
|
||||
break;
|
||||
}
|
||||
|
||||
rq = nop_user_request(ce, NULL);
|
||||
intel_context_put(ce);
|
||||
|
||||
if (IS_ERR(rq)) {
|
||||
ret = PTR_ERR(rq);
|
||||
pr_err("Failed to create request, %d: %d\n", i, ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
last[i] = rq;
|
||||
}
|
||||
|
||||
for (i = 0; i < 3; ++i) {
|
||||
ret = i915_request_wait(last[i], 0, HZ);
|
||||
if (ret < 0) {
|
||||
pr_err("Last request failed to complete: %d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
i915_request_put(last[i]);
|
||||
last[i] = NULL;
|
||||
}
|
||||
|
||||
/* Force all H2G / G2H to be submitted / processed */
|
||||
intel_gt_retire_requests(gt);
|
||||
msleep(500);
|
||||
|
||||
/* Scrub missing G2H */
|
||||
intel_gt_handle_error(engine->gt, -1, 0, "selftest reset");
|
||||
|
||||
/* GT will not idle if G2H are lost */
|
||||
ret = intel_gt_wait_for_idle(gt, HZ);
|
||||
if (ret < 0) {
|
||||
pr_err("GT failed to idle: %d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
err:
|
||||
for (i = 0; i < 3; ++i)
|
||||
if (last[i])
|
||||
i915_request_put(last[i]);
|
||||
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int intel_guc_live_selftests(struct drm_i915_private *i915)
|
||||
{
|
||||
static const struct i915_subtest tests[] = {
|
||||
SUBTEST(intel_guc_scrub_ctbs),
|
||||
};
|
||||
struct intel_gt *gt = &i915->gt;
|
||||
|
||||
if (intel_gt_is_wedged(gt))
|
||||
return 0;
|
||||
|
||||
if (!intel_uc_uses_guc_submission(>->uc))
|
||||
return 0;
|
||||
|
||||
return intel_gt_live_subtests(tests, gt);
|
||||
}
|
||||
@@ -47,5 +47,6 @@ selftest(execlists, intel_execlists_live_selftests)
|
||||
selftest(ring_submission, intel_ring_submission_live_selftests)
|
||||
selftest(perf, i915_perf_live_selftests)
|
||||
selftest(slpc, intel_slpc_live_selftests)
|
||||
selftest(guc, intel_guc_live_selftests)
|
||||
/* Here be dragons: keep last to run last! */
|
||||
selftest(late_gt_pm, intel_gt_pm_late_selftests)
|
||||
|
||||
@@ -14,6 +14,18 @@
|
||||
#define REDUCED_PREEMPT 10
|
||||
#define WAIT_FOR_RESET_TIME 10000
|
||||
|
||||
struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
for_each_engine(engine, gt, id)
|
||||
return engine;
|
||||
|
||||
pr_err("No valid engine found!\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int intel_selftest_modify_policy(struct intel_engine_cs *engine,
|
||||
struct intel_selftest_saved_policy *saved,
|
||||
u32 modify_type)
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
|
||||
struct i915_request;
|
||||
struct intel_engine_cs;
|
||||
struct intel_gt;
|
||||
|
||||
struct intel_selftest_saved_policy {
|
||||
u32 flags;
|
||||
@@ -23,6 +24,7 @@ enum selftest_scheduler_modify {
|
||||
SELFTEST_SCHEDULER_MODIFY_FAST_RESET,
|
||||
};
|
||||
|
||||
struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt);
|
||||
int intel_selftest_modify_policy(struct intel_engine_cs *engine,
|
||||
struct intel_selftest_saved_policy *saved,
|
||||
enum selftest_scheduler_modify modify_type);
|
||||
|
||||
Reference in New Issue
Block a user