forked from Minki/linux
Merge tag 'drm-intel-next-fixes-2021-04-27' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
drm/i915 fixes for v5.13-rc1: - Several fixes to GLK handling in recent display refactoring (Ville) - Rare watchdog timer race fix (Tvrtko) - Cppcheck redundant condition fix (José) - Overlay error code propagation fix (Dan Carpenter) - Documentation fix (Maarten) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/871raw5d3g.fsf@intel.com
This commit is contained in:
commit
1cd6b4a04f
@ -1403,7 +1403,8 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
|
||||
* require the entire fb to accommodate that to avoid
|
||||
* potential runtime errors at plane configuration time.
|
||||
*/
|
||||
if (IS_DISPLAY_VER(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
|
||||
if ((IS_DISPLAY_VER(dev_priv, 9) || IS_GEMINILAKE(dev_priv)) &&
|
||||
color_plane == 0 && fb->width > 3840)
|
||||
tile_width *= 4;
|
||||
/*
|
||||
* The main surface pitch must be padded to a multiple of four
|
||||
|
@ -96,7 +96,7 @@ static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
|
||||
* Detecting LTTPRs must be avoided on platforms with an AUX timeout
|
||||
* period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
|
||||
*/
|
||||
if (DISPLAY_VER(i915) < 10)
|
||||
if (DISPLAY_VER(i915) < 10 || IS_GEMINILAKE(i915))
|
||||
return false;
|
||||
|
||||
if (drm_dp_read_lttpr_common_caps(&intel_dp->aux,
|
||||
|
@ -597,7 +597,7 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
|
||||
return false;
|
||||
|
||||
/* Display WA #1105: skl,bxt,kbl,cfl,glk */
|
||||
if (IS_DISPLAY_VER(dev_priv, 9) &&
|
||||
if ((IS_DISPLAY_VER(dev_priv, 9) || IS_GEMINILAKE(dev_priv)) &&
|
||||
modifier == DRM_FORMAT_MOD_LINEAR && stride & 511)
|
||||
return false;
|
||||
|
||||
|
@ -803,8 +803,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
||||
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
|
||||
|
||||
vma = intel_overlay_pin_fb(new_bo);
|
||||
if (IS_ERR(vma))
|
||||
if (IS_ERR(vma)) {
|
||||
ret = PTR_ERR(vma);
|
||||
goto out_pin_section;
|
||||
}
|
||||
|
||||
i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB);
|
||||
|
||||
|
@ -1519,8 +1519,7 @@ void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
|
||||
u32 psr_status;
|
||||
|
||||
mutex_lock(&intel_dp->psr.lock);
|
||||
if (!intel_dp->psr.enabled ||
|
||||
(intel_dp->psr.enabled && intel_dp->psr.psr2_enabled)) {
|
||||
if (!intel_dp->psr.enabled || intel_dp->psr.psr2_enabled) {
|
||||
mutex_unlock(&intel_dp->psr.lock);
|
||||
continue;
|
||||
}
|
||||
|
@ -70,6 +70,7 @@ static void try_to_writeback(struct drm_i915_gem_object *obj,
|
||||
|
||||
/**
|
||||
* i915_gem_shrink - Shrink buffer object caches
|
||||
* @ww: i915 gem ww acquire ctx, or NULL
|
||||
* @i915: i915 device
|
||||
* @target: amount of memory to make available, in pages
|
||||
* @nr_scanned: optional output for number of pages scanned (incremental)
|
||||
|
@ -941,11 +941,6 @@ static int cmd_reg_handler(struct parser_exec_state *s,
|
||||
|
||||
/* below are all lri handlers */
|
||||
vreg = &vgpu_vreg(s->vgpu, offset);
|
||||
if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) {
|
||||
gvt_vgpu_err("%s access to non-render register (%x)\n",
|
||||
cmd, offset);
|
||||
return -EBADRQC;
|
||||
}
|
||||
|
||||
if (is_cmd_update_pdps(offset, s) &&
|
||||
cmd_pdp_mmio_update_handler(s, offset, index))
|
||||
|
@ -587,12 +587,6 @@ static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
|
||||
entry, index, false, 0, mm->vgpu);
|
||||
}
|
||||
|
||||
static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm *mm,
|
||||
struct intel_gvt_gtt_entry *entry, unsigned long index)
|
||||
{
|
||||
_ppgtt_set_root_entry(mm, entry, index, true);
|
||||
}
|
||||
|
||||
static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
|
||||
struct intel_gvt_gtt_entry *entry, unsigned long index)
|
||||
{
|
||||
|
@ -126,7 +126,7 @@ static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
|
||||
static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
|
||||
{
|
||||
int i, j;
|
||||
struct intel_vgpu_type *type;
|
||||
@ -144,7 +144,7 @@ static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
|
||||
gvt_vgpu_type_groups[i] = group;
|
||||
}
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
|
||||
unwind:
|
||||
for (j = 0; j < i; j++) {
|
||||
@ -152,7 +152,7 @@ unwind:
|
||||
kfree(group);
|
||||
}
|
||||
|
||||
return false;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
|
||||
@ -373,7 +373,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
|
||||
goto out_clean_thread;
|
||||
|
||||
ret = intel_gvt_init_vgpu_type_groups(gvt);
|
||||
if (ret == false) {
|
||||
if (ret) {
|
||||
gvt_err("failed to init vgpu type groups: %d\n", ret);
|
||||
goto out_clean_types;
|
||||
}
|
||||
|
@ -350,6 +350,8 @@ static void __rq_arm_watchdog(struct i915_request *rq)
|
||||
if (!ce->watchdog.timeout_us)
|
||||
return;
|
||||
|
||||
i915_request_get(rq);
|
||||
|
||||
hrtimer_init(&wdg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
wdg->timer.function = __rq_watchdog_expired;
|
||||
hrtimer_start_range_ns(&wdg->timer,
|
||||
@ -357,7 +359,6 @@ static void __rq_arm_watchdog(struct i915_request *rq)
|
||||
NSEC_PER_USEC),
|
||||
NSEC_PER_MSEC,
|
||||
HRTIMER_MODE_REL);
|
||||
i915_request_get(rq);
|
||||
}
|
||||
|
||||
static void __rq_cancel_watchdog(struct i915_request *rq)
|
||||
|
Loading…
Reference in New Issue
Block a user