forked from Minki/linux
drm/i915 fixes for v5.6-rc6:
- hard lockup fix - GVT fixes - 32-bit alignment issue fix - timeline wait fixes - cacheline_retire and free -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEFWWmW3ewYy4RJOWc05gHnSar7m8FAl5p0LUACgkQ05gHnSar 7m/7pg//SVkaEtCyCLsyI6Eff/HLzsJMj5wr+pCYm/GHWowpf0BX9fevw43ncaUR tyAPNidib/HnBB9ohicnMLRMD8EVz0RVckkgDsmdFmMdtwQvCDusrp9xPPXhjELD fd+cFEj2EOnuOjQqHxFrdaIHIuKkoIPU/6kWxizdEUt7XSHgQA/o9BVoRUxqSqSu 2Pjn2uczEaTtwXwN01oJbQhBmpNUe3T2iUJkTikWayi2Di215XPSpkVsdbLSThYH Jegh6KWXwlz7EH2ty/Y0tCPNFqy52mDXVBh6gEo+7xx3g4yerOktBCKEnxR5NUjH AUqzLRrCvF+dRhWEZCdwrQ/yObsLFaEICX6kZxv17tek9g+7nGjgjrVn/57RwaI/ ZWyvF4MT0w/UtAfPceQKUVmFUDNJfXaKYybB4/b/9+oFVhxhyBTJoolHv4eDkVZj X+qk8fSQMV78ZLAmzIQ6itpjt9Fso1fvddKysABB6Es+3hhX1cNY43a8C0Md+6NM DPKzQgW1astEilPSDWKU/FYwuS5xJUHqhm782p2SkwyxjlZ+M5Brk8VKP6y9rfjc R6Mkx5R/Qxj64oTrr/wpEfCJq3i5qOUVawFL/2iGqLrF+y/ScTSKwDk/5OkwB5xg UVOq3ywbeG7ifhy7+m0iRE+W5YP8U/EMH+iOVZbWiFCfE9TLfK8= =ub5y -----END PGP SIGNATURE----- Merge tag 'drm-intel-fixes-2020-03-12' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes drm/i915 fixes for v5.6-rc6: - hard lockup fix - GVT fixes - 32-bit alignment issue fix - timeline wait fixes - cacheline_retire and free Signed-off-by: Dave Airlie <airlied@redhat.com> From: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/87lfo6ksvw.fsf@intel.com
This commit is contained in:
commit
f31d83f047
@ -423,7 +423,8 @@ eb_validate_vma(struct i915_execbuffer *eb,
|
||||
if (unlikely(entry->flags & eb->invalid_flags))
|
||||
return -EINVAL;
|
||||
|
||||
if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
|
||||
if (unlikely(entry->alignment &&
|
||||
!is_power_of_2_u64(entry->alignment)))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
|
@ -1679,11 +1679,9 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
|
||||
if (!intel_engine_has_timeslices(engine))
|
||||
return false;
|
||||
|
||||
if (list_is_last(&rq->sched.link, &engine->active.requests))
|
||||
return false;
|
||||
|
||||
hint = max(rq_prio(list_next_entry(rq, sched.link)),
|
||||
engine->execlists.queue_priority_hint);
|
||||
hint = engine->execlists.queue_priority_hint;
|
||||
if (!list_is_last(&rq->sched.link, &engine->active.requests))
|
||||
hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
|
||||
|
||||
return hint >= effective_prio(rq);
|
||||
}
|
||||
@ -1725,6 +1723,18 @@ static void set_timeslice(struct intel_engine_cs *engine)
|
||||
set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
|
||||
}
|
||||
|
||||
static void start_timeslice(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_engine_execlists *execlists = &engine->execlists;
|
||||
|
||||
execlists->switch_priority_hint = execlists->queue_priority_hint;
|
||||
|
||||
if (timer_pending(&execlists->timer))
|
||||
return;
|
||||
|
||||
set_timer_ms(&execlists->timer, timeslice(engine));
|
||||
}
|
||||
|
||||
static void record_preemption(struct intel_engine_execlists *execlists)
|
||||
{
|
||||
(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
|
||||
@ -1888,11 +1898,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
||||
* Even if ELSP[1] is occupied and not worthy
|
||||
* of timeslices, our queue might be.
|
||||
*/
|
||||
if (!execlists->timer.expires &&
|
||||
need_timeslice(engine, last))
|
||||
set_timer_ms(&execlists->timer,
|
||||
timeslice(engine));
|
||||
|
||||
start_timeslice(engine);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -1927,7 +1933,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
||||
|
||||
if (last && !can_merge_rq(last, rq)) {
|
||||
spin_unlock(&ve->base.active.lock);
|
||||
return; /* leave this for another */
|
||||
start_timeslice(engine);
|
||||
return; /* leave this for another sibling */
|
||||
}
|
||||
|
||||
ENGINE_TRACE(engine,
|
||||
|
@ -192,11 +192,15 @@ static void cacheline_release(struct intel_timeline_cacheline *cl)
|
||||
|
||||
static void cacheline_free(struct intel_timeline_cacheline *cl)
|
||||
{
|
||||
if (!i915_active_acquire_if_busy(&cl->active)) {
|
||||
__idle_cacheline_free(cl);
|
||||
return;
|
||||
}
|
||||
|
||||
GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
|
||||
cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
|
||||
|
||||
if (i915_active_is_idle(&cl->active))
|
||||
__idle_cacheline_free(cl);
|
||||
i915_active_release(&cl->active);
|
||||
}
|
||||
|
||||
int intel_timeline_init(struct intel_timeline *timeline,
|
||||
|
@ -457,7 +457,8 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
/* TODO: add more platforms support */
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
|
||||
IS_COFFEELAKE(dev_priv)) {
|
||||
if (connected) {
|
||||
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
|
||||
SFUSE_STRAP_DDID_DETECTED;
|
||||
|
@ -147,15 +147,14 @@ static void virt_vbt_generation(struct vbt *v)
|
||||
/* there's features depending on version! */
|
||||
v->header.version = 155;
|
||||
v->header.header_size = sizeof(v->header);
|
||||
v->header.vbt_size = sizeof(struct vbt) - sizeof(v->header);
|
||||
v->header.vbt_size = sizeof(struct vbt);
|
||||
v->header.bdb_offset = offsetof(struct vbt, bdb_header);
|
||||
|
||||
strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
|
||||
v->bdb_header.version = 186; /* child_dev_size = 33 */
|
||||
v->bdb_header.header_size = sizeof(v->bdb_header);
|
||||
|
||||
v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
|
||||
- sizeof(struct bdb_header);
|
||||
v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header);
|
||||
|
||||
/* general features */
|
||||
v->general_features_header.id = BDB_GENERAL_FEATURES;
|
||||
|
@ -272,10 +272,17 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
|
||||
WARN(vgpu->active, "vGPU is still active!\n");
|
||||
|
||||
/*
|
||||
* remove idr first so later clean can judge if need to stop
|
||||
* service if no active vgpu.
|
||||
*/
|
||||
mutex_lock(&gvt->lock);
|
||||
idr_remove(&gvt->vgpu_idr, vgpu->id);
|
||||
mutex_unlock(&gvt->lock);
|
||||
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
intel_gvt_debugfs_remove_vgpu(vgpu);
|
||||
intel_vgpu_clean_sched_policy(vgpu);
|
||||
intel_vgpu_clean_submission(vgpu);
|
||||
@ -290,7 +297,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
idr_remove(&gvt->vgpu_idr, vgpu->id);
|
||||
if (idr_is_empty(&gvt->vgpu_idr))
|
||||
intel_gvt_clean_irq(gvt);
|
||||
intel_gvt_update_vgpu_types(gvt);
|
||||
|
@ -527,19 +527,31 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static void irq_semaphore_cb(struct irq_work *wrk)
|
||||
{
|
||||
struct i915_request *rq =
|
||||
container_of(wrk, typeof(*rq), semaphore_work);
|
||||
|
||||
i915_schedule_bump_priority(rq, I915_PRIORITY_NOSEMAPHORE);
|
||||
i915_request_put(rq);
|
||||
}
|
||||
|
||||
static int __i915_sw_fence_call
|
||||
semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
|
||||
{
|
||||
struct i915_request *request =
|
||||
container_of(fence, typeof(*request), semaphore);
|
||||
struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
|
||||
|
||||
switch (state) {
|
||||
case FENCE_COMPLETE:
|
||||
i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE);
|
||||
if (!(READ_ONCE(rq->sched.attr.priority) & I915_PRIORITY_NOSEMAPHORE)) {
|
||||
i915_request_get(rq);
|
||||
init_irq_work(&rq->semaphore_work, irq_semaphore_cb);
|
||||
irq_work_queue(&rq->semaphore_work);
|
||||
}
|
||||
break;
|
||||
|
||||
case FENCE_FREE:
|
||||
i915_request_put(request);
|
||||
i915_request_put(rq);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -776,8 +788,8 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
|
||||
struct dma_fence *fence;
|
||||
int err;
|
||||
|
||||
GEM_BUG_ON(i915_request_timeline(rq) ==
|
||||
rcu_access_pointer(signal->timeline));
|
||||
if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline))
|
||||
return 0;
|
||||
|
||||
if (i915_request_started(signal))
|
||||
return 0;
|
||||
@ -821,7 +833,7 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
|
||||
return 0;
|
||||
|
||||
err = 0;
|
||||
if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
|
||||
if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
|
||||
err = i915_sw_fence_await_dma_fence(&rq->submit,
|
||||
fence, 0,
|
||||
I915_FENCE_GFP);
|
||||
@ -1318,9 +1330,9 @@ void __i915_request_queue(struct i915_request *rq,
|
||||
* decide whether to preempt the entire chain so that it is ready to
|
||||
* run at the earliest possible convenience.
|
||||
*/
|
||||
i915_sw_fence_commit(&rq->semaphore);
|
||||
if (attr && rq->engine->schedule)
|
||||
rq->engine->schedule(rq, attr);
|
||||
i915_sw_fence_commit(&rq->semaphore);
|
||||
i915_sw_fence_commit(&rq->submit);
|
||||
}
|
||||
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define I915_REQUEST_H
|
||||
|
||||
#include <linux/dma-fence.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/lockdep.h>
|
||||
|
||||
#include "gem/i915_gem_context_types.h"
|
||||
@ -208,6 +209,7 @@ struct i915_request {
|
||||
};
|
||||
struct list_head execute_cb;
|
||||
struct i915_sw_fence semaphore;
|
||||
struct irq_work semaphore_work;
|
||||
|
||||
/*
|
||||
* A list of everyone we wait upon, and everyone who waits upon us.
|
||||
|
@ -234,6 +234,11 @@ static inline u64 ptr_to_u64(const void *ptr)
|
||||
__idx; \
|
||||
})
|
||||
|
||||
static inline bool is_power_of_2_u64(u64 n)
|
||||
{
|
||||
return (n != 0 && ((n & (n - 1)) == 0));
|
||||
}
|
||||
|
||||
static inline void __list_del_many(struct list_head *head,
|
||||
struct list_head *first)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user