drm/i915: Move debug only per-request pid tracking from request to ctx

Since contexts are not currently shared between userspace processes, we
have an exact correspondence between context creator and guilty batch
submitter. Therefore we can save some per-batch work by inspecting the
context->pid upon error instead. Note that we take the context's
creator's pid rather than the file's pid in order to better track fd
passed over sockets.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1471254551-25805-29-git-send-email-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2016-08-15 10:49:08 +01:00
parent bde13ebdab
commit c84455b4ba
6 changed files with 32 additions and 21 deletions

View File

@ -460,6 +460,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
print_context_stats(m, dev_priv);
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_request *request;
struct task_struct *task;
memset(&stats, 0, sizeof(stats));
@ -473,10 +475,17 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
* still alive (e.g. get_pid(current) => fork() => exit()).
* Therefore, we need to protect this ->comm access using RCU.
*/
mutex_lock(&dev->struct_mutex);
request = list_first_entry_or_null(&file_priv->mm.request_list,
struct drm_i915_gem_request,
client_list);
rcu_read_lock();
task = pid_task(file->pid, PIDTYPE_PID);
task = pid_task(request && request->ctx->pid ?
request->ctx->pid : file->pid,
PIDTYPE_PID);
print_file_stats(m, task ? task->comm : "<unknown>", stats);
rcu_read_unlock();
mutex_unlock(&dev->struct_mutex);
}
mutex_unlock(&dev->filelist_mutex);
@ -658,12 +667,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
seq_printf(m, "%s requests: %d\n", engine->name, count);
list_for_each_entry(req, &engine->request_list, link) {
struct pid *pid = req->ctx->pid;
struct task_struct *task;
rcu_read_lock();
task = NULL;
if (req->pid)
task = pid_task(req->pid, PIDTYPE_PID);
task = pid ? pid_task(pid, PIDTYPE_PID) : NULL;
seq_printf(m, " %x @ %d: %s [%d]\n",
req->fence.seqno,
(int) (jiffies - req->emitted_jiffies),
@ -1952,18 +1960,17 @@ static int i915_context_status(struct seq_file *m, void *unused)
list_for_each_entry(ctx, &dev_priv->context_list, link) {
seq_printf(m, "HW context %u ", ctx->hw_id);
if (IS_ERR(ctx->file_priv)) {
seq_puts(m, "(deleted) ");
} else if (ctx->file_priv) {
struct pid *pid = ctx->file_priv->file->pid;
if (ctx->pid) {
struct task_struct *task;
task = get_pid_task(pid, PIDTYPE_PID);
task = get_pid_task(ctx->pid, PIDTYPE_PID);
if (task) {
seq_printf(m, "(%s [%d]) ",
task->comm, task->pid);
put_task_struct(task);
}
} else if (IS_ERR(ctx->file_priv)) {
seq_puts(m, "(deleted) ");
} else {
seq_puts(m, "(kernel) ");
}

View File

@ -782,6 +782,7 @@ struct drm_i915_error_state {
struct drm_i915_error_request {
long jiffies;
pid_t pid;
u32 seqno;
u32 head;
u32 tail;
@ -880,6 +881,7 @@ struct i915_gem_context {
struct drm_i915_private *i915;
struct drm_i915_file_private *file_priv;
struct i915_hw_ppgtt *ppgtt;
struct pid *pid;
struct i915_ctx_hang_stats hang_stats;

View File

@ -158,6 +158,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
i915_vma_put(ce->state);
}
put_pid(ctx->pid);
list_del(&ctx->link);
ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
@ -311,6 +312,9 @@ __create_hw_context(struct drm_device *dev,
ret = DEFAULT_CONTEXT_HANDLE;
ctx->file_priv = file_priv;
if (file_priv)
ctx->pid = get_task_pid(current, PIDTYPE_PID);
ctx->user_handle = ret;
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there

View File

@ -137,8 +137,6 @@ int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
list_add_tail(&req->client_list, &file_priv->mm.request_list);
spin_unlock(&file_priv->mm.lock);
req->pid = get_pid(task_pid(current));
return 0;
}
@ -154,9 +152,6 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
list_del(&request->client_list);
request->file_priv = NULL;
spin_unlock(&file_priv->mm.lock);
put_pid(request->pid);
request->pid = NULL;
}
void i915_gem_retire_noop(struct i915_gem_active *active,
@ -407,7 +402,6 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
req->previous_context = NULL;
req->file_priv = NULL;
req->batch = NULL;
req->pid = NULL;
req->elsp_submitted = 0;
/*

View File

@ -134,9 +134,6 @@ struct drm_i915_gem_request {
/** file_priv list entry for this request */
struct list_head client_list;
/** process identifier submitting this request */
struct pid *pid;
/**
* The ELSP only accepts two elements at a time, so we queue
* context/tail pairs on a given queue (ring->execlist_queue) until the

View File

@ -470,7 +470,8 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
dev_priv->engine[i].name,
ee->num_requests);
for (j = 0; j < ee->num_requests; j++) {
err_printf(m, " seqno 0x%08x, emitted %ld, head 0x%08x, tail 0x%08x\n",
err_printf(m, " pid %d, seqno 0x%08x, emitted %ld, head 0x%08x, tail 0x%08x\n",
ee->requests[j].pid,
ee->requests[j].seqno,
ee->requests[j].jiffies,
ee->requests[j].head,
@ -1076,6 +1077,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
request = i915_gem_find_active_request(engine);
if (request) {
struct intel_ring *ring;
struct pid *pid;
ee->vm = request->ctx->ppgtt ?
&request->ctx->ppgtt->base : &ggtt->base;
@ -1097,11 +1099,12 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
i915_error_object_create(dev_priv,
request->ctx->engine[i].state);
if (request->pid) {
pid = request->ctx->pid;
if (pid) {
struct task_struct *task;
rcu_read_lock();
task = pid_task(request->pid, PIDTYPE_PID);
task = pid_task(pid, PIDTYPE_PID);
if (task) {
strcpy(ee->comm, task->comm);
ee->pid = task->pid;
@ -1166,6 +1169,10 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
erq->jiffies = request->emitted_jiffies;
erq->head = request->head;
erq->tail = request->tail;
rcu_read_lock();
erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
rcu_read_unlock();
}
}
}