From 0f75551216091223efe1f18295f655aff6415385 Mon Sep 17 00:00:00 2001
From: Weinan Li <weinan.z.li@intel.com>
Date: Tue, 22 Jan 2019 13:46:27 +0800
Subject: [PATCH 1/2] drm/i915/gvt: release shadow batch buffer and wa_ctx
 before destroy one workload

GVT-g will shadow the privilege batch buffer and the indirect context
during command scan, move the release process into
intel_vgpu_destroy_workload() to ensure the resources are recycled
properly.

Fixes: 0cce2823ed37 ("drm/i915/gvt/kvmgt:Refine error handling for prepare_execlist_workload")
Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Signed-off-by: Weinan Li <weinan.z.li@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
---
 drivers/gpu/drm/i915/gvt/scheduler.c | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 5567ddc7760f..55bb7885e228 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -332,6 +332,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 
 	i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
 	i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+
+	wa_ctx->indirect_ctx.obj = NULL;
+	wa_ctx->indirect_ctx.shadow_va = NULL;
 }
 
 static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
@@ -911,11 +914,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 
 	list_del_init(&workload->list);
 
-	if (!workload->status) {
-		release_shadow_batch_buffer(workload);
-		release_shadow_wa_ctx(&workload->wa_ctx);
-	}
-
 	if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
 		/* if workload->status is not successful means HW GPU
 		 * has occurred GPU hang or something wrong with i915/GVT,
@@ -1283,6 +1281,9 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
 {
 	struct intel_vgpu_submission *s = &workload->vgpu->submission;
 
+	release_shadow_batch_buffer(workload);
+	release_shadow_wa_ctx(&workload->wa_ctx);
+
 	if (workload->shadow_mm)
 		intel_vgpu_mm_put(workload->shadow_mm);
 

From 2b24408158276faf0f4a71eb97a2364686d5b06c Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 23 Jan 2019 13:51:55 +0000
Subject: [PATCH 2/2] drm/i915/execlists: Mark up priority boost on preemption

Record the priority boost we giving to the preempted client or else we
may end up in a situation where the priority queue no longer matches the
request priority order and so we can end up in an infinite loop of
preempting the same pair of requests.

Fixes: e9eaf82d97a2 ("drm/i915: Priority boost for waiting clients")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190123135155.21562-1-chris@chris-wilson.co.uk
(cherry picked from commit 6e062b60b0b1bd82cac475e63cdb8c451647182b)
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
---
 drivers/gpu/drm/i915/intel_lrc.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 4796f40a6d4f..eab9341a5152 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -303,6 +303,7 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
 	 */
 	if (!(prio & I915_PRIORITY_NEWCLIENT)) {
 		prio |= I915_PRIORITY_NEWCLIENT;
+		active->sched.attr.priority = prio;
 		list_move_tail(&active->sched.link,
 			       i915_sched_lookup_priolist(engine, prio));
 	}
@@ -645,6 +646,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		int i;
 
 		priolist_for_each_request_consume(rq, rn, p, i) {
+			GEM_BUG_ON(last &&
+				   need_preempt(engine, last, rq_prio(rq)));
+
 			/*
 			 * Can we combine this request with the current port?
 			 * It has to be the same context/ringbuffer and not