- Fix selftest refcnt leak (Xiyu)

- Fix gem vma lock (Chris)
 - Fix gt's i915_request.timeline acquire by checking if cacheline is valid (Chris)
 - Fix IRQ postinistall fault masks (Matt)
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEbSBwaO7dZQkcLOKj+mJfZA7rE8oFAl6q2fwACgkQ+mJfZA7r
 E8rzpgf+KbbE2JZlPnshRpcOXQw+Ezg1Af3wYJnW6zCVpSdCOzRPvFbm5YJlFp7N
 /LGCzQs0sg4ae2AupwFHhtPwFgzgPr0OxS9/VQpKWMKA9j3mZ7aOKRT9+lCaD4qk
 79jr/4mjeR8T6+NcMAtidzFcCHTgmNqJGavm9I04bbInMA3snvbVAYdzmr+1xiAw
 1NPx8HLZ2FmJrNI9dg7DReq6GlVygrFFMUAbFAcYtozv+xFWAHfJ8IKZI+5ej2UJ
 RXlMKMR6Gg2QgNhzuaycvcl45AaEhTHMWLQoe2njZnI3bPm5WMHKo9/KxYXM67uH
 Dmx4+JlJVsgSrw3SlhLqJTz4eLvG3A==
 =V8R2
 -----END PGP SIGNATURE-----

Merge tag 'drm-intel-fixes-2020-04-30' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

- Fix selftest refcnt leak (Xiyu)
- Fix gem vma lock (Chris)
- Fix gt's i915_request.timeline acquire by checking if cacheline is valid (Chris)
- Fix IRQ postinistall fault masks (Matt)

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200430140042.GA270140@intel.com
This commit is contained in:
Dave Airlie 2020-05-01 10:56:13 +10:00
commit a979bb700a
5 changed files with 38 additions and 16 deletions

View File

@ -182,21 +182,35 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
int tiling_mode, unsigned int stride) int tiling_mode, unsigned int stride)
{ {
struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt; struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
struct i915_vma *vma; struct i915_vma *vma, *vn;
LIST_HEAD(unbind);
int ret = 0; int ret = 0;
if (tiling_mode == I915_TILING_NONE) if (tiling_mode == I915_TILING_NONE)
return 0; return 0;
mutex_lock(&ggtt->vm.mutex); mutex_lock(&ggtt->vm.mutex);
spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj) { for_each_ggtt_vma(vma, obj) {
GEM_BUG_ON(vma->vm != &ggtt->vm);
if (i915_vma_fence_prepare(vma, tiling_mode, stride)) if (i915_vma_fence_prepare(vma, tiling_mode, stride))
continue; continue;
ret = __i915_vma_unbind(vma); list_move(&vma->vm_link, &unbind);
if (ret)
break;
} }
spin_unlock(&obj->vma.lock);
list_for_each_entry_safe(vma, vn, &unbind, vm_link) {
ret = __i915_vma_unbind(vma);
if (ret) {
/* Restore the remaining vma on an error */
list_splice(&unbind, &ggtt->vm.bound_list);
break;
}
}
mutex_unlock(&ggtt->vm.mutex); mutex_unlock(&ggtt->vm.mutex);
return ret; return ret;
@ -268,6 +282,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
} }
mutex_unlock(&obj->mm.lock); mutex_unlock(&obj->mm.lock);
spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj) { for_each_ggtt_vma(vma, obj) {
vma->fence_size = vma->fence_size =
i915_gem_fence_size(i915, vma->size, tiling, stride); i915_gem_fence_size(i915, vma->size, tiling, stride);
@ -278,6 +293,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
if (vma->fence) if (vma->fence)
vma->fence->dirty = true; vma->fence->dirty = true;
} }
spin_unlock(&obj->vma.lock);
obj->tiling_and_stride = tiling | stride; obj->tiling_and_stride = tiling | stride;
i915_gem_object_unlock(obj); i915_gem_object_unlock(obj);

View File

@ -1477,8 +1477,10 @@ static int igt_ppgtt_pin_update(void *arg)
unsigned int page_size = BIT(first); unsigned int page_size = BIT(first);
obj = i915_gem_object_create_internal(dev_priv, page_size); obj = i915_gem_object_create_internal(dev_priv, page_size);
if (IS_ERR(obj)) if (IS_ERR(obj)) {
return PTR_ERR(obj); err = PTR_ERR(obj);
goto out_vm;
}
vma = i915_vma_instance(obj, vm, NULL); vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
@ -1531,8 +1533,10 @@ static int igt_ppgtt_pin_update(void *arg)
} }
obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE); obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
if (IS_ERR(obj)) if (IS_ERR(obj)) {
return PTR_ERR(obj); err = PTR_ERR(obj);
goto out_vm;
}
vma = i915_vma_instance(obj, vm, NULL); vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {

View File

@ -521,6 +521,8 @@ int intel_timeline_read_hwsp(struct i915_request *from,
rcu_read_lock(); rcu_read_lock();
cl = rcu_dereference(from->hwsp_cacheline); cl = rcu_dereference(from->hwsp_cacheline);
if (i915_request_completed(from)) /* confirm cacheline is valid */
goto unlock;
if (unlikely(!i915_active_acquire_if_busy(&cl->active))) if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
goto unlock; /* seqno wrapped and completed! */ goto unlock; /* seqno wrapped and completed! */
if (unlikely(i915_request_completed(from))) if (unlikely(i915_request_completed(from)))

View File

@ -3358,7 +3358,8 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{ {
struct intel_uncore *uncore = &dev_priv->uncore; struct intel_uncore *uncore = &dev_priv->uncore;
u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
GEN8_PIPE_CDCLK_CRC_DONE;
u32 de_pipe_enables; u32 de_pipe_enables;
u32 de_port_masked = GEN8_AUX_CHANNEL_A; u32 de_port_masked = GEN8_AUX_CHANNEL_A;
u32 de_port_enables; u32 de_port_enables;
@ -3369,13 +3370,10 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
de_misc_masked |= GEN8_DE_MISC_GSE; de_misc_masked |= GEN8_DE_MISC_GSE;
if (INTEL_GEN(dev_priv) >= 9) { if (INTEL_GEN(dev_priv) >= 9) {
de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D; GEN9_AUX_CHANNEL_D;
if (IS_GEN9_LP(dev_priv)) if (IS_GEN9_LP(dev_priv))
de_port_masked |= BXT_DE_PORT_GMBUS; de_port_masked |= BXT_DE_PORT_GMBUS;
} else {
de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
} }
if (INTEL_GEN(dev_priv) >= 11) if (INTEL_GEN(dev_priv) >= 11)

View File

@ -158,16 +158,18 @@ vma_create(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
spin_lock(&obj->vma.lock);
if (i915_is_ggtt(vm)) { if (i915_is_ggtt(vm)) {
if (unlikely(overflows_type(vma->size, u32))) if (unlikely(overflows_type(vma->size, u32)))
goto err_vma; goto err_unlock;
vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
i915_gem_object_get_tiling(obj), i915_gem_object_get_tiling(obj),
i915_gem_object_get_stride(obj)); i915_gem_object_get_stride(obj));
if (unlikely(vma->fence_size < vma->size || /* overflow */ if (unlikely(vma->fence_size < vma->size || /* overflow */
vma->fence_size > vm->total)) vma->fence_size > vm->total))
goto err_vma; goto err_unlock;
GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
@ -179,8 +181,6 @@ vma_create(struct drm_i915_gem_object *obj,
__set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
} }
spin_lock(&obj->vma.lock);
rb = NULL; rb = NULL;
p = &obj->vma.tree.rb_node; p = &obj->vma.tree.rb_node;
while (*p) { while (*p) {
@ -225,6 +225,8 @@ vma_create(struct drm_i915_gem_object *obj,
return vma; return vma;
err_unlock:
spin_unlock(&obj->vma.lock);
err_vma: err_vma:
i915_vma_free(vma); i915_vma_free(vma);
return ERR_PTR(-E2BIG); return ERR_PTR(-E2BIG);