drm/i915/guc: Use system workqueue for log capture

We only employ a single task for log capture, and created a workqueue
for the purpose of ensuring we had a high priority queue for low
latency. We can simply use the system_highpri_wq and avoid the
complication with creating our own admist the maze of mutexes.
(Currently we create the wq early before we even know we need it in
order to avoid trying to create it on demand while we hold the logging
mutex.)

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190713100016.8026-1-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2019-07-13 11:00:06 +01:00
parent cb823ed991
commit a30eff49d0
3 changed files with 2 additions and 42 deletions

View File

@ -99,47 +99,9 @@ void intel_guc_init_early(struct intel_guc *guc)
}
}
static int guc_init_wq(struct intel_guc *guc)
{
/*
* GuC log buffer flush work item has to do register access to
* send the ack to GuC and this work item, if not synced before
* suspend, can potentially get executed after the GFX device is
* suspended.
* By marking the WQ as freezable, we don't have to bother about
* flushing of this work item from the suspend hooks, the pending
* work item if any will be either executed before the suspend
* or scheduled later on resume. This way the handling of work
* item can be kept same between system suspend & rpm suspend.
*/
guc->log.relay.flush_wq =
alloc_ordered_workqueue("i915-guc_log",
WQ_HIGHPRI | WQ_FREEZABLE);
if (!guc->log.relay.flush_wq) {
DRM_ERROR("Couldn't allocate workqueue for GuC log\n");
return -ENOMEM;
}
return 0;
}
static void guc_fini_wq(struct intel_guc *guc)
{
struct workqueue_struct *wq;
wq = fetch_and_zero(&guc->log.relay.flush_wq);
if (wq)
destroy_workqueue(wq);
}
int intel_guc_init_misc(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_i915(guc);
int ret;
ret = guc_init_wq(guc);
if (ret)
return ret;
intel_uc_fw_fetch(i915, &guc->fw);
@ -149,7 +111,6 @@ int intel_guc_init_misc(struct intel_guc *guc)
void intel_guc_fini_misc(struct intel_guc *guc)
{
intel_uc_fw_cleanup_fetch(&guc->fw);
guc_fini_wq(guc);
}
static int guc_shared_data_create(struct intel_guc *guc)

View File

@ -578,7 +578,7 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
* the flush notification. This means that we need to unconditionally
* flush on relay enabling, since GuC only notifies us once.
*/
queue_work(log->relay.flush_wq, &log->relay.flush_work);
queue_work(system_highpri_wq, &log->relay.flush_work);
return 0;
@ -628,5 +628,5 @@ void intel_guc_log_relay_close(struct intel_guc_log *log)
void intel_guc_log_handle_flush_event(struct intel_guc_log *log)
{
queue_work(log->relay.flush_wq, &log->relay.flush_work);
queue_work(system_highpri_wq, &log->relay.flush_work);
}

View File

@ -66,7 +66,6 @@ struct intel_guc_log {
struct i915_vma *vma;
struct {
void *buf_addr;
struct workqueue_struct *flush_wq;
struct work_struct flush_work;
struct rchan *channel;
struct mutex lock;