forked from Minki/linux
Merge tag 'drm-intel-next-fixes-2016-10-11' of git://anongit.freedesktop.org/drm-intel into drm-next
A big bunch of i915 fixes for drm-next / v4.9 merge window, with more than half of them also cc: stable. We also continue to have more Fixes: annotations for our fixes, which should help the backporters and archeologists. * tag 'drm-intel-next-fixes-2016-10-11' of git://anongit.freedesktop.org/drm-intel: (27 commits) drm/i915: Fix conflict resolution from backmerge of v4.8-rc8 to drm-next drm/i915/guc: Unwind GuC workqueue reservation if request construction fails drm/i915: Reset the breadcrumbs IRQ more carefully drm/i915: Force relocations via cpu if we run out of idle aperture drm/i915: Distinguish last emitted request from last submitted request drm/i915: Allow DP to work w/o EDID drm/i915: Move long hpd handling into the hotplug work drm/i915/execlists: Reinitialise context image after GPU hang drm/i915: Use correct index for backtracking HUNG semaphores drm/i915: Unalias obj->phys_handle and obj->userptr drm/i915: Just clear the mmiodebug before a register access drm/i915/gen9: only add the planes actually affected by ddb changes drm/i915: Allow PCH DPLL sharing regardless of DPLL_SDVO_HIGH_SPEED drm/i915/bxt: Fix HDMI DPLL configuration drm/i915/gen9: fix the watermark res_blocks value drm/i915/gen9: fix plane_blocks_per_line on watermarks calculations drm/i915/gen9: minimum scanlines for Y tile is not always 4 drm/i915/gen9: fix the WaWmMemoryReadLatency implementation drm/i915/kbl: KBL also needs to run the SAGV code drm/i915: introduce intel_has_sagv() ...
This commit is contained in:
commit
28da9ed657
@ -1786,15 +1786,6 @@ void i915_reset(struct drm_i915_private *dev_priv)
|
|||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* rps/rc6 re-init is necessary to restore state lost after the
|
|
||||||
* reset and the re-install of gt irqs. Skip for ironlake per
|
|
||||||
* previous concerns that it doesn't respond well to some forms
|
|
||||||
* of re-init after reset.
|
|
||||||
*/
|
|
||||||
intel_sanitize_gt_powersave(dev_priv);
|
|
||||||
intel_autoenable_gt_powersave(dev_priv);
|
|
||||||
|
|
||||||
wakeup:
|
wakeup:
|
||||||
wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
|
wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
|
||||||
return;
|
return;
|
||||||
@ -1872,7 +1863,17 @@ static int i915_pm_resume(struct device *kdev)
|
|||||||
/* freeze: before creating the hibernation_image */
|
/* freeze: before creating the hibernation_image */
|
||||||
static int i915_pm_freeze(struct device *kdev)
|
static int i915_pm_freeze(struct device *kdev)
|
||||||
{
|
{
|
||||||
return i915_pm_suspend(kdev);
|
int ret;
|
||||||
|
|
||||||
|
ret = i915_pm_suspend(kdev);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = i915_gem_freeze(kdev_to_i915(kdev));
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int i915_pm_freeze_late(struct device *kdev)
|
static int i915_pm_freeze_late(struct device *kdev)
|
||||||
|
@ -1984,11 +1984,11 @@ struct drm_i915_private {
|
|||||||
struct vlv_s0ix_state vlv_s0ix_state;
|
struct vlv_s0ix_state vlv_s0ix_state;
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
I915_SKL_SAGV_UNKNOWN = 0,
|
I915_SAGV_UNKNOWN = 0,
|
||||||
I915_SKL_SAGV_DISABLED,
|
I915_SAGV_DISABLED,
|
||||||
I915_SKL_SAGV_ENABLED,
|
I915_SAGV_ENABLED,
|
||||||
I915_SKL_SAGV_NOT_CONTROLLED
|
I915_SAGV_NOT_CONTROLLED
|
||||||
} skl_sagv_status;
|
} sagv_status;
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
/*
|
/*
|
||||||
@ -2276,21 +2276,19 @@ struct drm_i915_gem_object {
|
|||||||
/** Record of address bit 17 of each page at last unbind. */
|
/** Record of address bit 17 of each page at last unbind. */
|
||||||
unsigned long *bit_17;
|
unsigned long *bit_17;
|
||||||
|
|
||||||
union {
|
struct i915_gem_userptr {
|
||||||
/** for phy allocated objects */
|
uintptr_t ptr;
|
||||||
struct drm_dma_handle *phys_handle;
|
unsigned read_only :1;
|
||||||
|
unsigned workers :4;
|
||||||
struct i915_gem_userptr {
|
|
||||||
uintptr_t ptr;
|
|
||||||
unsigned read_only :1;
|
|
||||||
unsigned workers :4;
|
|
||||||
#define I915_GEM_USERPTR_MAX_WORKERS 15
|
#define I915_GEM_USERPTR_MAX_WORKERS 15
|
||||||
|
|
||||||
struct i915_mm_struct *mm;
|
struct i915_mm_struct *mm;
|
||||||
struct i915_mmu_object *mmu_object;
|
struct i915_mmu_object *mmu_object;
|
||||||
struct work_struct *work;
|
struct work_struct *work;
|
||||||
} userptr;
|
} userptr;
|
||||||
};
|
|
||||||
|
/** for phys allocated objects */
|
||||||
|
struct drm_dma_handle *phys_handle;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct drm_i915_gem_object *
|
static inline struct drm_i915_gem_object *
|
||||||
@ -3076,6 +3074,7 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
|
|||||||
void i915_gem_load_init(struct drm_device *dev);
|
void i915_gem_load_init(struct drm_device *dev);
|
||||||
void i915_gem_load_cleanup(struct drm_device *dev);
|
void i915_gem_load_cleanup(struct drm_device *dev);
|
||||||
void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
|
void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
|
||||||
|
int i915_gem_freeze(struct drm_i915_private *dev_priv);
|
||||||
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
|
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
|
||||||
|
|
||||||
void *i915_gem_object_alloc(struct drm_device *dev);
|
void *i915_gem_object_alloc(struct drm_device *dev);
|
||||||
|
@ -2616,8 +2616,6 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
|
|||||||
list_for_each_entry_continue(request, &engine->request_list, link)
|
list_for_each_entry_continue(request, &engine->request_list, link)
|
||||||
if (request->ctx == incomplete_ctx)
|
if (request->ctx == incomplete_ctx)
|
||||||
reset_request(request);
|
reset_request(request);
|
||||||
|
|
||||||
engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void i915_gem_reset(struct drm_i915_private *dev_priv)
|
void i915_gem_reset(struct drm_i915_private *dev_priv)
|
||||||
@ -2628,9 +2626,15 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
|
|||||||
|
|
||||||
for_each_engine(engine, dev_priv)
|
for_each_engine(engine, dev_priv)
|
||||||
i915_gem_reset_engine(engine);
|
i915_gem_reset_engine(engine);
|
||||||
mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
|
|
||||||
|
|
||||||
i915_gem_restore_fences(&dev_priv->drm);
|
i915_gem_restore_fences(&dev_priv->drm);
|
||||||
|
|
||||||
|
if (dev_priv->gt.awake) {
|
||||||
|
intel_sanitize_gt_powersave(dev_priv);
|
||||||
|
intel_enable_gt_powersave(dev_priv);
|
||||||
|
if (INTEL_GEN(dev_priv) >= 6)
|
||||||
|
gen6_rps_busy(dev_priv);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nop_submit_request(struct drm_i915_gem_request *request)
|
static void nop_submit_request(struct drm_i915_gem_request *request)
|
||||||
@ -4589,6 +4593,19 @@ void i915_gem_load_cleanup(struct drm_device *dev)
|
|||||||
rcu_barrier();
|
rcu_barrier();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int i915_gem_freeze(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
intel_runtime_pm_get(dev_priv);
|
||||||
|
|
||||||
|
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||||
|
i915_gem_shrink_all(dev_priv);
|
||||||
|
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||||
|
|
||||||
|
intel_runtime_pm_put(dev_priv);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
|
int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
@ -4612,7 +4629,8 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
|
|||||||
* the objects as well.
|
* the objects as well.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
i915_gem_shrink_all(dev_priv);
|
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||||
|
i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
|
||||||
|
|
||||||
for (p = phases; *p; p++) {
|
for (p = phases; *p; p++) {
|
||||||
list_for_each_entry(obj, *p, global_list) {
|
list_for_each_entry(obj, *p, global_list) {
|
||||||
@ -4620,6 +4638,7 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
|
|||||||
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -451,8 +451,8 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
|
|||||||
0, ggtt->mappable_end,
|
0, ggtt->mappable_end,
|
||||||
DRM_MM_SEARCH_DEFAULT,
|
DRM_MM_SEARCH_DEFAULT,
|
||||||
DRM_MM_CREATE_DEFAULT);
|
DRM_MM_CREATE_DEFAULT);
|
||||||
if (ret)
|
if (ret) /* no inactive aperture space, use cpu reloc */
|
||||||
return ERR_PTR(ret);
|
return NULL;
|
||||||
} else {
|
} else {
|
||||||
ret = i915_vma_put_fence(vma);
|
ret = i915_vma_put_fence(vma);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -328,6 +328,7 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
|
|||||||
|
|
||||||
switch (state) {
|
switch (state) {
|
||||||
case FENCE_COMPLETE:
|
case FENCE_COMPLETE:
|
||||||
|
request->engine->last_submitted_seqno = request->fence.seqno;
|
||||||
request->engine->submit_request(request);
|
request->engine->submit_request(request);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -641,8 +642,8 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
|
|||||||
&request->submitq);
|
&request->submitq);
|
||||||
|
|
||||||
request->emitted_jiffies = jiffies;
|
request->emitted_jiffies = jiffies;
|
||||||
request->previous_seqno = engine->last_submitted_seqno;
|
request->previous_seqno = engine->last_pending_seqno;
|
||||||
engine->last_submitted_seqno = request->fence.seqno;
|
engine->last_pending_seqno = request->fence.seqno;
|
||||||
i915_gem_active_set(&engine->last_request, request);
|
i915_gem_active_set(&engine->last_request, request);
|
||||||
list_add_tail(&request->link, &engine->request_list);
|
list_add_tail(&request->link, &engine->request_list);
|
||||||
list_add_tail(&request->ring_link, &ring->request_list);
|
list_add_tail(&request->ring_link, &ring->request_list);
|
||||||
|
@ -451,6 +451,18 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void i915_guc_wq_unreserve(struct drm_i915_gem_request *request)
|
||||||
|
{
|
||||||
|
const size_t wqi_size = sizeof(struct guc_wq_item);
|
||||||
|
struct i915_guc_client *gc = request->i915->guc.execbuf_client;
|
||||||
|
|
||||||
|
GEM_BUG_ON(READ_ONCE(gc->wq_rsvd) < wqi_size);
|
||||||
|
|
||||||
|
spin_lock(&gc->wq_lock);
|
||||||
|
gc->wq_rsvd -= wqi_size;
|
||||||
|
spin_unlock(&gc->wq_lock);
|
||||||
|
}
|
||||||
|
|
||||||
/* Construct a Work Item and append it to the GuC's Work Queue */
|
/* Construct a Work Item and append it to the GuC's Work Queue */
|
||||||
static void guc_wq_item_append(struct i915_guc_client *gc,
|
static void guc_wq_item_append(struct i915_guc_client *gc,
|
||||||
struct drm_i915_gem_request *rq)
|
struct drm_i915_gem_request *rq)
|
||||||
|
@ -350,6 +350,9 @@ void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
|
|||||||
|
|
||||||
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
|
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
|
if (READ_ONCE(dev_priv->rps.interrupts_enabled))
|
||||||
|
return;
|
||||||
|
|
||||||
spin_lock_irq(&dev_priv->irq_lock);
|
spin_lock_irq(&dev_priv->irq_lock);
|
||||||
WARN_ON_ONCE(dev_priv->rps.pm_iir);
|
WARN_ON_ONCE(dev_priv->rps.pm_iir);
|
||||||
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
|
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
|
||||||
@ -368,6 +371,9 @@ u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
|
|||||||
|
|
||||||
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
|
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
|
if (!READ_ONCE(dev_priv->rps.interrupts_enabled))
|
||||||
|
return;
|
||||||
|
|
||||||
spin_lock_irq(&dev_priv->irq_lock);
|
spin_lock_irq(&dev_priv->irq_lock);
|
||||||
dev_priv->rps.interrupts_enabled = false;
|
dev_priv->rps.interrupts_enabled = false;
|
||||||
|
|
||||||
@ -2816,7 +2822,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
|
|||||||
if (engine == signaller)
|
if (engine == signaller)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (offset == signaller->semaphore.signal_ggtt[engine->id])
|
if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
|
||||||
return signaller;
|
return signaller;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -2826,13 +2832,13 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
|
|||||||
if(engine == signaller)
|
if(engine == signaller)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (sync_bits == signaller->semaphore.mbox.wait[engine->id])
|
if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
|
||||||
return signaller;
|
return signaller;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
|
DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
|
||||||
engine->id, ipehr, offset);
|
engine->name, ipehr, offset);
|
||||||
|
|
||||||
return ERR_PTR(-ENODEV);
|
return ERR_PTR(-ENODEV);
|
||||||
}
|
}
|
||||||
|
@ -578,6 +578,36 @@ int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void cancel_fake_irq(struct intel_engine_cs *engine)
|
||||||
|
{
|
||||||
|
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||||
|
|
||||||
|
del_timer_sync(&b->hangcheck);
|
||||||
|
del_timer_sync(&b->fake_irq);
|
||||||
|
clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
|
||||||
|
}
|
||||||
|
|
||||||
|
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
|
||||||
|
{
|
||||||
|
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||||
|
|
||||||
|
cancel_fake_irq(engine);
|
||||||
|
spin_lock(&b->lock);
|
||||||
|
|
||||||
|
__intel_breadcrumbs_disable_irq(b);
|
||||||
|
if (intel_engine_has_waiter(engine)) {
|
||||||
|
b->timeout = wait_timeout();
|
||||||
|
__intel_breadcrumbs_enable_irq(b);
|
||||||
|
if (READ_ONCE(b->irq_posted))
|
||||||
|
wake_up_process(b->first_wait->tsk);
|
||||||
|
} else {
|
||||||
|
/* sanitize the IMR and unmask any auxiliary interrupts */
|
||||||
|
irq_disable(engine);
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock(&b->lock);
|
||||||
|
}
|
||||||
|
|
||||||
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
|
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||||
@ -585,8 +615,7 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
|
|||||||
if (!IS_ERR_OR_NULL(b->signaler))
|
if (!IS_ERR_OR_NULL(b->signaler))
|
||||||
kthread_stop(b->signaler);
|
kthread_stop(b->signaler);
|
||||||
|
|
||||||
del_timer_sync(&b->hangcheck);
|
cancel_fake_irq(engine);
|
||||||
del_timer_sync(&b->fake_irq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned int intel_kick_waiters(struct drm_i915_private *i915)
|
unsigned int intel_kick_waiters(struct drm_i915_private *i915)
|
||||||
|
@ -3408,6 +3408,8 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
|
|||||||
dst_w--;
|
dst_w--;
|
||||||
dst_h--;
|
dst_h--;
|
||||||
|
|
||||||
|
intel_crtc->dspaddr_offset = surf_addr;
|
||||||
|
|
||||||
intel_crtc->adjusted_x = src_x;
|
intel_crtc->adjusted_x = src_x;
|
||||||
intel_crtc->adjusted_y = src_y;
|
intel_crtc->adjusted_y = src_y;
|
||||||
|
|
||||||
@ -3629,6 +3631,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
|
|||||||
intel_runtime_pm_disable_interrupts(dev_priv);
|
intel_runtime_pm_disable_interrupts(dev_priv);
|
||||||
intel_runtime_pm_enable_interrupts(dev_priv);
|
intel_runtime_pm_enable_interrupts(dev_priv);
|
||||||
|
|
||||||
|
intel_pps_unlock_regs_wa(dev_priv);
|
||||||
intel_modeset_init_hw(dev);
|
intel_modeset_init_hw(dev);
|
||||||
|
|
||||||
spin_lock_irq(&dev_priv->irq_lock);
|
spin_lock_irq(&dev_priv->irq_lock);
|
||||||
@ -9509,6 +9512,24 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
|
|||||||
if (intel_crtc_has_dp_encoder(crtc_state))
|
if (intel_crtc_has_dp_encoder(crtc_state))
|
||||||
dpll |= DPLL_SDVO_HIGH_SPEED;
|
dpll |= DPLL_SDVO_HIGH_SPEED;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The high speed IO clock is only really required for
|
||||||
|
* SDVO/HDMI/DP, but we also enable it for CRT to make it
|
||||||
|
* possible to share the DPLL between CRT and HDMI. Enabling
|
||||||
|
* the clock needlessly does no real harm, except use up a
|
||||||
|
* bit of power potentially.
|
||||||
|
*
|
||||||
|
* We'll limit this to IVB with 3 pipes, since it has only two
|
||||||
|
* DPLLs and so DPLL sharing is the only way to get three pipes
|
||||||
|
* driving PCH ports at the same time. On SNB we could do this,
|
||||||
|
* and potentially avoid enabling the second DPLL, but it's not
|
||||||
|
* clear if it''s a win or loss power wise. No point in doing
|
||||||
|
* this on ILK at all since it has a fixed DPLL<->pipe mapping.
|
||||||
|
*/
|
||||||
|
if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
|
||||||
|
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
|
||||||
|
dpll |= DPLL_SDVO_HIGH_SPEED;
|
||||||
|
|
||||||
/* compute bitmask from p1 value */
|
/* compute bitmask from p1 value */
|
||||||
dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
|
dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
|
||||||
/* also FPA1 */
|
/* also FPA1 */
|
||||||
@ -14364,8 +14385,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
|
|||||||
* SKL workaround: bspec recommends we disable the SAGV when we
|
* SKL workaround: bspec recommends we disable the SAGV when we
|
||||||
* have more then one pipe enabled
|
* have more then one pipe enabled
|
||||||
*/
|
*/
|
||||||
if (IS_SKYLAKE(dev_priv) && !skl_can_enable_sagv(state))
|
if (!intel_can_enable_sagv(state))
|
||||||
skl_disable_sagv(dev_priv);
|
intel_disable_sagv(dev_priv);
|
||||||
|
|
||||||
intel_modeset_verify_disabled(dev);
|
intel_modeset_verify_disabled(dev);
|
||||||
}
|
}
|
||||||
@ -14422,9 +14443,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
|
|||||||
intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
|
intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_SKYLAKE(dev_priv) && intel_state->modeset &&
|
if (intel_state->modeset && intel_can_enable_sagv(state))
|
||||||
skl_can_enable_sagv(state))
|
intel_enable_sagv(dev_priv);
|
||||||
skl_enable_sagv(dev_priv);
|
|
||||||
|
|
||||||
drm_atomic_helper_commit_hw_done(state);
|
drm_atomic_helper_commit_hw_done(state);
|
||||||
|
|
||||||
|
@ -4337,7 +4337,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
|
|||||||
intel_dp->has_audio = false;
|
intel_dp->has_audio = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static enum drm_connector_status
|
||||||
intel_dp_long_pulse(struct intel_connector *intel_connector)
|
intel_dp_long_pulse(struct intel_connector *intel_connector)
|
||||||
{
|
{
|
||||||
struct drm_connector *connector = &intel_connector->base;
|
struct drm_connector *connector = &intel_connector->base;
|
||||||
@ -4361,7 +4361,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
|
|||||||
else
|
else
|
||||||
status = connector_status_disconnected;
|
status = connector_status_disconnected;
|
||||||
|
|
||||||
if (status != connector_status_connected) {
|
if (status == connector_status_disconnected) {
|
||||||
intel_dp->compliance_test_active = 0;
|
intel_dp->compliance_test_active = 0;
|
||||||
intel_dp->compliance_test_type = 0;
|
intel_dp->compliance_test_type = 0;
|
||||||
intel_dp->compliance_test_data = 0;
|
intel_dp->compliance_test_data = 0;
|
||||||
@ -4423,8 +4423,8 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
|
|||||||
intel_dp->aux.i2c_defer_count = 0;
|
intel_dp->aux.i2c_defer_count = 0;
|
||||||
|
|
||||||
intel_dp_set_edid(intel_dp);
|
intel_dp_set_edid(intel_dp);
|
||||||
|
if (is_edp(intel_dp) || intel_connector->detect_edid)
|
||||||
status = connector_status_connected;
|
status = connector_status_connected;
|
||||||
intel_dp->detect_done = true;
|
intel_dp->detect_done = true;
|
||||||
|
|
||||||
/* Try to read the source of the interrupt */
|
/* Try to read the source of the interrupt */
|
||||||
@ -4443,12 +4443,11 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
|
|||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if ((status != connector_status_connected) &&
|
if (status != connector_status_connected && !intel_dp->is_mst)
|
||||||
(intel_dp->is_mst == false))
|
|
||||||
intel_dp_unset_edid(intel_dp);
|
intel_dp_unset_edid(intel_dp);
|
||||||
|
|
||||||
intel_display_power_put(to_i915(dev), power_domain);
|
intel_display_power_put(to_i915(dev), power_domain);
|
||||||
return;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum drm_connector_status
|
static enum drm_connector_status
|
||||||
@ -4457,7 +4456,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
|
|||||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||||
struct intel_encoder *intel_encoder = &intel_dig_port->base;
|
struct intel_encoder *intel_encoder = &intel_dig_port->base;
|
||||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
enum drm_connector_status status = connector->status;
|
||||||
|
|
||||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
|
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
|
||||||
connector->base.id, connector->name);
|
connector->base.id, connector->name);
|
||||||
@ -4472,14 +4471,11 @@ intel_dp_detect(struct drm_connector *connector, bool force)
|
|||||||
|
|
||||||
/* If full detect is not performed yet, do a full detect */
|
/* If full detect is not performed yet, do a full detect */
|
||||||
if (!intel_dp->detect_done)
|
if (!intel_dp->detect_done)
|
||||||
intel_dp_long_pulse(intel_dp->attached_connector);
|
status = intel_dp_long_pulse(intel_dp->attached_connector);
|
||||||
|
|
||||||
intel_dp->detect_done = false;
|
intel_dp->detect_done = false;
|
||||||
|
|
||||||
if (is_edp(intel_dp) || intel_connector->detect_edid)
|
return status;
|
||||||
return connector_status_connected;
|
|
||||||
else
|
|
||||||
return connector_status_disconnected;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -4831,36 +4827,34 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
|
|||||||
port_name(intel_dig_port->port),
|
port_name(intel_dig_port->port),
|
||||||
long_hpd ? "long" : "short");
|
long_hpd ? "long" : "short");
|
||||||
|
|
||||||
|
if (long_hpd) {
|
||||||
|
intel_dp->detect_done = false;
|
||||||
|
return IRQ_NONE;
|
||||||
|
}
|
||||||
|
|
||||||
power_domain = intel_display_port_aux_power_domain(intel_encoder);
|
power_domain = intel_display_port_aux_power_domain(intel_encoder);
|
||||||
intel_display_power_get(dev_priv, power_domain);
|
intel_display_power_get(dev_priv, power_domain);
|
||||||
|
|
||||||
if (long_hpd) {
|
if (intel_dp->is_mst) {
|
||||||
intel_dp_long_pulse(intel_dp->attached_connector);
|
if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
|
||||||
if (intel_dp->is_mst)
|
/*
|
||||||
ret = IRQ_HANDLED;
|
* If we were in MST mode, and device is not
|
||||||
goto put_power;
|
* there, get out of MST mode
|
||||||
|
*/
|
||||||
} else {
|
DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
|
||||||
if (intel_dp->is_mst) {
|
intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
|
||||||
if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
|
intel_dp->is_mst = false;
|
||||||
/*
|
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
|
||||||
* If we were in MST mode, and device is not
|
intel_dp->is_mst);
|
||||||
* there, get out of MST mode
|
intel_dp->detect_done = false;
|
||||||
*/
|
goto put_power;
|
||||||
DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
|
|
||||||
intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
|
|
||||||
intel_dp->is_mst = false;
|
|
||||||
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
|
|
||||||
intel_dp->is_mst);
|
|
||||||
goto put_power;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (!intel_dp->is_mst) {
|
if (!intel_dp->is_mst) {
|
||||||
if (!intel_dp_short_pulse(intel_dp)) {
|
if (!intel_dp_short_pulse(intel_dp)) {
|
||||||
intel_dp_long_pulse(intel_dp->attached_connector);
|
intel_dp->detect_done = false;
|
||||||
goto put_power;
|
goto put_power;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1694,21 +1694,32 @@ bool bxt_ddi_dp_set_dpll_hw_state(int clock,
|
|||||||
return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
|
return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc *intel_crtc,
|
||||||
|
struct intel_crtc_state *crtc_state, int clock,
|
||||||
|
struct intel_dpll_hw_state *dpll_hw_state)
|
||||||
|
{
|
||||||
|
struct bxt_clk_div clk_div = { };
|
||||||
|
|
||||||
|
bxt_ddi_hdmi_pll_dividers(intel_crtc, crtc_state, clock, &clk_div);
|
||||||
|
|
||||||
|
return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
|
||||||
|
}
|
||||||
|
|
||||||
static struct intel_shared_dpll *
|
static struct intel_shared_dpll *
|
||||||
bxt_get_dpll(struct intel_crtc *crtc,
|
bxt_get_dpll(struct intel_crtc *crtc,
|
||||||
struct intel_crtc_state *crtc_state,
|
struct intel_crtc_state *crtc_state,
|
||||||
struct intel_encoder *encoder)
|
struct intel_encoder *encoder)
|
||||||
{
|
{
|
||||||
struct bxt_clk_div clk_div = {0};
|
struct intel_dpll_hw_state dpll_hw_state = { };
|
||||||
struct intel_dpll_hw_state dpll_hw_state = {0};
|
|
||||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||||
struct intel_digital_port *intel_dig_port;
|
struct intel_digital_port *intel_dig_port;
|
||||||
struct intel_shared_dpll *pll;
|
struct intel_shared_dpll *pll;
|
||||||
int i, clock = crtc_state->port_clock;
|
int i, clock = crtc_state->port_clock;
|
||||||
|
|
||||||
if (encoder->type == INTEL_OUTPUT_HDMI
|
if (encoder->type == INTEL_OUTPUT_HDMI &&
|
||||||
&& !bxt_ddi_hdmi_pll_dividers(crtc, crtc_state,
|
!bxt_ddi_hdmi_set_dpll_hw_state(crtc, crtc_state, clock,
|
||||||
clock, &clk_div))
|
&dpll_hw_state))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if ((encoder->type == INTEL_OUTPUT_DP ||
|
if ((encoder->type == INTEL_OUTPUT_DP ||
|
||||||
|
@ -263,6 +263,7 @@ struct intel_panel {
|
|||||||
bool enabled;
|
bool enabled;
|
||||||
bool combination_mode; /* gen 2/4 only */
|
bool combination_mode; /* gen 2/4 only */
|
||||||
bool active_low_pwm;
|
bool active_low_pwm;
|
||||||
|
bool alternate_pwm_increment; /* lpt+ */
|
||||||
|
|
||||||
/* PWM chip */
|
/* PWM chip */
|
||||||
bool util_pin_active_low; /* bxt+ */
|
bool util_pin_active_low; /* bxt+ */
|
||||||
@ -1741,9 +1742,9 @@ void ilk_wm_get_hw_state(struct drm_device *dev);
|
|||||||
void skl_wm_get_hw_state(struct drm_device *dev);
|
void skl_wm_get_hw_state(struct drm_device *dev);
|
||||||
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
|
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
|
||||||
struct skl_ddb_allocation *ddb /* out */);
|
struct skl_ddb_allocation *ddb /* out */);
|
||||||
bool skl_can_enable_sagv(struct drm_atomic_state *state);
|
bool intel_can_enable_sagv(struct drm_atomic_state *state);
|
||||||
int skl_enable_sagv(struct drm_i915_private *dev_priv);
|
int intel_enable_sagv(struct drm_i915_private *dev_priv);
|
||||||
int skl_disable_sagv(struct drm_i915_private *dev_priv);
|
int intel_disable_sagv(struct drm_i915_private *dev_priv);
|
||||||
bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
|
bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
|
||||||
const struct skl_ddb_allocation *new,
|
const struct skl_ddb_allocation *new,
|
||||||
enum pipe pipe);
|
enum pipe pipe);
|
||||||
|
@ -210,9 +210,6 @@ void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
|
|||||||
void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
|
void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
|
memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
|
||||||
clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
|
|
||||||
if (intel_engine_has_waiter(engine))
|
|
||||||
i915_queue_hangcheck(engine->i915);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_engine_init_requests(struct intel_engine_cs *engine)
|
static void intel_engine_init_requests(struct intel_engine_cs *engine)
|
||||||
@ -307,18 +304,6 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_engine_reset_irq(struct intel_engine_cs *engine)
|
|
||||||
{
|
|
||||||
struct drm_i915_private *dev_priv = engine->i915;
|
|
||||||
|
|
||||||
spin_lock_irq(&dev_priv->irq_lock);
|
|
||||||
if (intel_engine_has_waiter(engine))
|
|
||||||
engine->irq_enable(engine);
|
|
||||||
else
|
|
||||||
engine->irq_disable(engine);
|
|
||||||
spin_unlock_irq(&dev_priv->irq_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_engines_cleanup_common - cleans up the engine state created by
|
* intel_engines_cleanup_common - cleans up the engine state created by
|
||||||
* the common initiailizers.
|
* the common initiailizers.
|
||||||
|
@ -160,6 +160,7 @@ extern int intel_guc_resume(struct drm_device *dev);
|
|||||||
int i915_guc_submission_init(struct drm_i915_private *dev_priv);
|
int i915_guc_submission_init(struct drm_i915_private *dev_priv);
|
||||||
int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
|
int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
|
||||||
int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
|
int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
|
||||||
|
void i915_guc_wq_unreserve(struct drm_i915_gem_request *request);
|
||||||
void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
|
void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
|
||||||
void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
|
void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
|
||||||
|
|
||||||
|
@ -226,10 +226,16 @@ enum {
|
|||||||
/* Typical size of the average request (2 pipecontrols and a MI_BB) */
|
/* Typical size of the average request (2 pipecontrols and a MI_BB) */
|
||||||
#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
|
#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
|
||||||
|
|
||||||
|
#define WA_TAIL_DWORDS 2
|
||||||
|
|
||||||
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
|
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
|
||||||
struct intel_engine_cs *engine);
|
struct intel_engine_cs *engine);
|
||||||
static int intel_lr_context_pin(struct i915_gem_context *ctx,
|
static int intel_lr_context_pin(struct i915_gem_context *ctx,
|
||||||
struct intel_engine_cs *engine);
|
struct intel_engine_cs *engine);
|
||||||
|
static void execlists_init_reg_state(u32 *reg_state,
|
||||||
|
struct i915_gem_context *ctx,
|
||||||
|
struct intel_engine_cs *engine,
|
||||||
|
struct intel_ring *ring);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
|
* intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
|
||||||
@ -621,6 +627,10 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
|
|||||||
|
|
||||||
request->ring = ce->ring;
|
request->ring = ce->ring;
|
||||||
|
|
||||||
|
ret = intel_lr_context_pin(request->ctx, engine);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
if (i915.enable_guc_submission) {
|
if (i915.enable_guc_submission) {
|
||||||
/*
|
/*
|
||||||
* Check that the GuC has space for the request before
|
* Check that the GuC has space for the request before
|
||||||
@ -629,21 +639,17 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
|
|||||||
*/
|
*/
|
||||||
ret = i915_guc_wq_reserve(request);
|
ret = i915_guc_wq_reserve(request);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
goto err_unpin;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = intel_lr_context_pin(request->ctx, engine);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
ret = intel_ring_begin(request, 0);
|
ret = intel_ring_begin(request, 0);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_unpin;
|
goto err_unreserve;
|
||||||
|
|
||||||
if (!ce->initialised) {
|
if (!ce->initialised) {
|
||||||
ret = engine->init_context(request);
|
ret = engine->init_context(request);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_unpin;
|
goto err_unreserve;
|
||||||
|
|
||||||
ce->initialised = true;
|
ce->initialised = true;
|
||||||
}
|
}
|
||||||
@ -658,6 +664,9 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
|
|||||||
request->reserved_space -= EXECLISTS_REQUEST_SIZE;
|
request->reserved_space -= EXECLISTS_REQUEST_SIZE;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_unreserve:
|
||||||
|
if (i915.enable_guc_submission)
|
||||||
|
i915_guc_wq_unreserve(request);
|
||||||
err_unpin:
|
err_unpin:
|
||||||
intel_lr_context_unpin(request->ctx, engine);
|
intel_lr_context_unpin(request->ctx, engine);
|
||||||
return ret;
|
return ret;
|
||||||
@ -708,7 +717,6 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
|
|||||||
{
|
{
|
||||||
struct intel_context *ce = &ctx->engine[engine->id];
|
struct intel_context *ce = &ctx->engine[engine->id];
|
||||||
void *vaddr;
|
void *vaddr;
|
||||||
u32 *lrc_reg_state;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
|
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
|
||||||
@ -727,17 +735,16 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
|
|||||||
goto unpin_vma;
|
goto unpin_vma;
|
||||||
}
|
}
|
||||||
|
|
||||||
lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
|
|
||||||
|
|
||||||
ret = intel_ring_pin(ce->ring);
|
ret = intel_ring_pin(ce->ring);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto unpin_map;
|
goto unpin_map;
|
||||||
|
|
||||||
intel_lr_context_descriptor_update(ctx, engine);
|
intel_lr_context_descriptor_update(ctx, engine);
|
||||||
|
|
||||||
lrc_reg_state[CTX_RING_BUFFER_START+1] =
|
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
|
||||||
|
ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
|
||||||
i915_ggtt_offset(ce->ring->vma);
|
i915_ggtt_offset(ce->ring->vma);
|
||||||
ce->lrc_reg_state = lrc_reg_state;
|
|
||||||
ce->state->obj->dirty = true;
|
ce->state->obj->dirty = true;
|
||||||
|
|
||||||
/* Invalidate GuC TLB. */
|
/* Invalidate GuC TLB. */
|
||||||
@ -1231,7 +1238,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
|
|||||||
|
|
||||||
lrc_init_hws(engine);
|
lrc_init_hws(engine);
|
||||||
|
|
||||||
intel_engine_reset_irq(engine);
|
intel_engine_reset_breadcrumbs(engine);
|
||||||
|
|
||||||
I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
|
I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
|
||||||
|
|
||||||
@ -1289,8 +1296,21 @@ static void reset_common_ring(struct intel_engine_cs *engine,
|
|||||||
struct execlist_port *port = engine->execlist_port;
|
struct execlist_port *port = engine->execlist_port;
|
||||||
struct intel_context *ce = &request->ctx->engine[engine->id];
|
struct intel_context *ce = &request->ctx->engine[engine->id];
|
||||||
|
|
||||||
|
/* We want a simple context + ring to execute the breadcrumb update.
|
||||||
|
* We cannot rely on the context being intact across the GPU hang,
|
||||||
|
* so clear it and rebuild just what we need for the breadcrumb.
|
||||||
|
* All pending requests for this context will be zapped, and any
|
||||||
|
* future request will be after userspace has had the opportunity
|
||||||
|
* to recreate its own state.
|
||||||
|
*/
|
||||||
|
execlists_init_reg_state(ce->lrc_reg_state,
|
||||||
|
request->ctx, engine, ce->ring);
|
||||||
|
|
||||||
/* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
|
/* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
|
||||||
|
ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
|
||||||
|
i915_ggtt_offset(ce->ring->vma);
|
||||||
ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
|
ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
|
||||||
|
|
||||||
request->ring->head = request->postfix;
|
request->ring->head = request->postfix;
|
||||||
request->ring->last_retired_head = -1;
|
request->ring->last_retired_head = -1;
|
||||||
intel_ring_update_space(request->ring);
|
intel_ring_update_space(request->ring);
|
||||||
@ -1310,6 +1330,9 @@ static void reset_common_ring(struct intel_engine_cs *engine,
|
|||||||
GEM_BUG_ON(request->ctx != port[0].request->ctx);
|
GEM_BUG_ON(request->ctx != port[0].request->ctx);
|
||||||
port[0].count = 0;
|
port[0].count = 0;
|
||||||
port[1].count = 0;
|
port[1].count = 0;
|
||||||
|
|
||||||
|
/* Reset WaIdleLiteRestore:bdw,skl as well */
|
||||||
|
request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
|
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
|
||||||
@ -1547,7 +1570,6 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
|
|||||||
* used as a workaround for not being allowed to do lite
|
* used as a workaround for not being allowed to do lite
|
||||||
* restore with HEAD==TAIL (WaIdleLiteRestore).
|
* restore with HEAD==TAIL (WaIdleLiteRestore).
|
||||||
*/
|
*/
|
||||||
#define WA_TAIL_DWORDS 2
|
|
||||||
|
|
||||||
static int gen8_emit_request(struct drm_i915_gem_request *request)
|
static int gen8_emit_request(struct drm_i915_gem_request *request)
|
||||||
{
|
{
|
||||||
@ -1894,38 +1916,13 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
|
|||||||
return indirect_ctx_offset;
|
return indirect_ctx_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static void execlists_init_reg_state(u32 *reg_state,
|
||||||
populate_lr_context(struct i915_gem_context *ctx,
|
struct i915_gem_context *ctx,
|
||||||
struct drm_i915_gem_object *ctx_obj,
|
struct intel_engine_cs *engine,
|
||||||
struct intel_engine_cs *engine,
|
struct intel_ring *ring)
|
||||||
struct intel_ring *ring)
|
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = ctx->i915;
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
|
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
|
||||||
void *vaddr;
|
|
||||||
u32 *reg_state;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!ppgtt)
|
|
||||||
ppgtt = dev_priv->mm.aliasing_ppgtt;
|
|
||||||
|
|
||||||
ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
|
|
||||||
if (ret) {
|
|
||||||
DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
|
|
||||||
if (IS_ERR(vaddr)) {
|
|
||||||
ret = PTR_ERR(vaddr);
|
|
||||||
DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
ctx_obj->dirty = true;
|
|
||||||
|
|
||||||
/* The second page of the context object contains some fields which must
|
|
||||||
* be set up prior to the first execution. */
|
|
||||||
reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
|
|
||||||
|
|
||||||
/* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
|
/* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
|
||||||
* commands followed by (reg, value) pairs. The values we are setting here are
|
* commands followed by (reg, value) pairs. The values we are setting here are
|
||||||
@ -1939,14 +1936,11 @@ populate_lr_context(struct i915_gem_context *ctx,
|
|||||||
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
|
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
|
||||||
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
|
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
|
||||||
(HAS_RESOURCE_STREAMER(dev_priv) ?
|
(HAS_RESOURCE_STREAMER(dev_priv) ?
|
||||||
CTX_CTRL_RS_CTX_ENABLE : 0)));
|
CTX_CTRL_RS_CTX_ENABLE : 0)));
|
||||||
ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
|
ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
|
||||||
0);
|
0);
|
||||||
ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
|
ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
|
||||||
0);
|
0);
|
||||||
/* Ring buffer start address is not known until the buffer is pinned.
|
|
||||||
* It is written to the context image in execlists_update_context()
|
|
||||||
*/
|
|
||||||
ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
|
ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
|
||||||
RING_START(engine->mmio_base), 0);
|
RING_START(engine->mmio_base), 0);
|
||||||
ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
|
ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
|
||||||
@ -2029,6 +2023,36 @@ populate_lr_context(struct i915_gem_context *ctx,
|
|||||||
ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
|
ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
|
||||||
make_rpcs(dev_priv));
|
make_rpcs(dev_priv));
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
populate_lr_context(struct i915_gem_context *ctx,
|
||||||
|
struct drm_i915_gem_object *ctx_obj,
|
||||||
|
struct intel_engine_cs *engine,
|
||||||
|
struct intel_ring *ring)
|
||||||
|
{
|
||||||
|
void *vaddr;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
|
||||||
|
if (ret) {
|
||||||
|
DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
|
||||||
|
if (IS_ERR(vaddr)) {
|
||||||
|
ret = PTR_ERR(vaddr);
|
||||||
|
DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
ctx_obj->dirty = true;
|
||||||
|
|
||||||
|
/* The second page of the context object contains some fields which must
|
||||||
|
* be set up prior to the first execution. */
|
||||||
|
|
||||||
|
execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
|
||||||
|
ctx, engine, ring);
|
||||||
|
|
||||||
i915_gem_object_unpin_map(ctx_obj);
|
i915_gem_object_unpin_map(ctx_obj);
|
||||||
|
|
||||||
|
@ -841,7 +841,7 @@ static void lpt_enable_backlight(struct intel_connector *connector)
|
|||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||||
struct intel_panel *panel = &connector->panel;
|
struct intel_panel *panel = &connector->panel;
|
||||||
u32 pch_ctl1, pch_ctl2;
|
u32 pch_ctl1, pch_ctl2, schicken;
|
||||||
|
|
||||||
pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
|
pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
|
||||||
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
|
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
|
||||||
@ -850,6 +850,22 @@ static void lpt_enable_backlight(struct intel_connector *connector)
|
|||||||
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
|
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (HAS_PCH_LPT(dev_priv)) {
|
||||||
|
schicken = I915_READ(SOUTH_CHICKEN2);
|
||||||
|
if (panel->backlight.alternate_pwm_increment)
|
||||||
|
schicken |= LPT_PWM_GRANULARITY;
|
||||||
|
else
|
||||||
|
schicken &= ~LPT_PWM_GRANULARITY;
|
||||||
|
I915_WRITE(SOUTH_CHICKEN2, schicken);
|
||||||
|
} else {
|
||||||
|
schicken = I915_READ(SOUTH_CHICKEN1);
|
||||||
|
if (panel->backlight.alternate_pwm_increment)
|
||||||
|
schicken |= SPT_PWM_GRANULARITY;
|
||||||
|
else
|
||||||
|
schicken &= ~SPT_PWM_GRANULARITY;
|
||||||
|
I915_WRITE(SOUTH_CHICKEN1, schicken);
|
||||||
|
}
|
||||||
|
|
||||||
pch_ctl2 = panel->backlight.max << 16;
|
pch_ctl2 = panel->backlight.max << 16;
|
||||||
I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
|
I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
|
||||||
|
|
||||||
@ -1242,10 +1258,10 @@ static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
|||||||
*/
|
*/
|
||||||
static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
struct intel_panel *panel = &connector->panel;
|
||||||
u32 mul;
|
u32 mul;
|
||||||
|
|
||||||
if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY)
|
if (panel->backlight.alternate_pwm_increment)
|
||||||
mul = 128;
|
mul = 128;
|
||||||
else
|
else
|
||||||
mul = 16;
|
mul = 16;
|
||||||
@ -1261,9 +1277,10 @@ static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
|||||||
static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||||
|
struct intel_panel *panel = &connector->panel;
|
||||||
u32 mul, clock;
|
u32 mul, clock;
|
||||||
|
|
||||||
if (I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY)
|
if (panel->backlight.alternate_pwm_increment)
|
||||||
mul = 16;
|
mul = 16;
|
||||||
else
|
else
|
||||||
mul = 128;
|
mul = 128;
|
||||||
@ -1414,6 +1431,13 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
|
|||||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||||
struct intel_panel *panel = &connector->panel;
|
struct intel_panel *panel = &connector->panel;
|
||||||
u32 pch_ctl1, pch_ctl2, val;
|
u32 pch_ctl1, pch_ctl2, val;
|
||||||
|
bool alt;
|
||||||
|
|
||||||
|
if (HAS_PCH_LPT(dev_priv))
|
||||||
|
alt = I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
|
||||||
|
else
|
||||||
|
alt = I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
|
||||||
|
panel->backlight.alternate_pwm_increment = alt;
|
||||||
|
|
||||||
pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
|
pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
|
||||||
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
|
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
|
||||||
|
@ -2126,33 +2126,35 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
|
|||||||
wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
|
wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
|
||||||
GEN9_MEM_LATENCY_LEVEL_MASK;
|
GEN9_MEM_LATENCY_LEVEL_MASK;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If a level n (n > 1) has a 0us latency, all levels m (m >= n)
|
||||||
|
* need to be disabled. We make sure to sanitize the values out
|
||||||
|
* of the punit to satisfy this requirement.
|
||||||
|
*/
|
||||||
|
for (level = 1; level <= max_level; level++) {
|
||||||
|
if (wm[level] == 0) {
|
||||||
|
for (i = level + 1; i <= max_level; i++)
|
||||||
|
wm[i] = 0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* WaWmMemoryReadLatency:skl
|
* WaWmMemoryReadLatency:skl
|
||||||
*
|
*
|
||||||
* punit doesn't take into account the read latency so we need
|
* punit doesn't take into account the read latency so we need
|
||||||
* to add 2us to the various latency levels we retrieve from
|
* to add 2us to the various latency levels we retrieve from the
|
||||||
* the punit.
|
* punit when level 0 response data us 0us.
|
||||||
* - W0 is a bit special in that it's the only level that
|
|
||||||
* can't be disabled if we want to have display working, so
|
|
||||||
* we always add 2us there.
|
|
||||||
* - For levels >=1, punit returns 0us latency when they are
|
|
||||||
* disabled, so we respect that and don't add 2us then
|
|
||||||
*
|
|
||||||
* Additionally, if a level n (n > 1) has a 0us latency, all
|
|
||||||
* levels m (m >= n) need to be disabled. We make sure to
|
|
||||||
* sanitize the values out of the punit to satisfy this
|
|
||||||
* requirement.
|
|
||||||
*/
|
*/
|
||||||
wm[0] += 2;
|
if (wm[0] == 0) {
|
||||||
for (level = 1; level <= max_level; level++)
|
wm[0] += 2;
|
||||||
if (wm[level] != 0)
|
for (level = 1; level <= max_level; level++) {
|
||||||
|
if (wm[level] == 0)
|
||||||
|
break;
|
||||||
wm[level] += 2;
|
wm[level] += 2;
|
||||||
else {
|
|
||||||
for (i = level + 1; i <= max_level; i++)
|
|
||||||
wm[i] = 0;
|
|
||||||
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
||||||
uint64_t sskpd = I915_READ64(MCH_SSKPD);
|
uint64_t sskpd = I915_READ64(MCH_SSKPD);
|
||||||
|
|
||||||
@ -2877,6 +2879,19 @@ skl_wm_plane_id(const struct intel_plane *plane)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
intel_has_sagv(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
if (IS_KABYLAKE(dev_priv))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (IS_SKYLAKE(dev_priv) &&
|
||||||
|
dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* SAGV dynamically adjusts the system agent voltage and clock frequencies
|
* SAGV dynamically adjusts the system agent voltage and clock frequencies
|
||||||
* depending on power and performance requirements. The display engine access
|
* depending on power and performance requirements. The display engine access
|
||||||
@ -2889,12 +2904,14 @@ skl_wm_plane_id(const struct intel_plane *plane)
|
|||||||
* - We're not using an interlaced display configuration
|
* - We're not using an interlaced display configuration
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
skl_enable_sagv(struct drm_i915_private *dev_priv)
|
intel_enable_sagv(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
|
if (!intel_has_sagv(dev_priv))
|
||||||
dev_priv->skl_sagv_status == I915_SKL_SAGV_ENABLED)
|
return 0;
|
||||||
|
|
||||||
|
if (dev_priv->sagv_status == I915_SAGV_ENABLED)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
DRM_DEBUG_KMS("Enabling the SAGV\n");
|
DRM_DEBUG_KMS("Enabling the SAGV\n");
|
||||||
@ -2910,21 +2927,21 @@ skl_enable_sagv(struct drm_i915_private *dev_priv)
|
|||||||
* Some skl systems, pre-release machines in particular,
|
* Some skl systems, pre-release machines in particular,
|
||||||
* don't actually have an SAGV.
|
* don't actually have an SAGV.
|
||||||
*/
|
*/
|
||||||
if (ret == -ENXIO) {
|
if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
|
||||||
DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
|
DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
|
||||||
dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
|
dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
|
||||||
return 0;
|
return 0;
|
||||||
} else if (ret < 0) {
|
} else if (ret < 0) {
|
||||||
DRM_ERROR("Failed to enable the SAGV\n");
|
DRM_ERROR("Failed to enable the SAGV\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_priv->skl_sagv_status = I915_SKL_SAGV_ENABLED;
|
dev_priv->sagv_status = I915_SAGV_ENABLED;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
skl_do_sagv_disable(struct drm_i915_private *dev_priv)
|
intel_do_sagv_disable(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
uint32_t temp = GEN9_SAGV_DISABLE;
|
uint32_t temp = GEN9_SAGV_DISABLE;
|
||||||
@ -2938,19 +2955,21 @@ skl_do_sagv_disable(struct drm_i915_private *dev_priv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
skl_disable_sagv(struct drm_i915_private *dev_priv)
|
intel_disable_sagv(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
int ret, result;
|
int ret, result;
|
||||||
|
|
||||||
if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
|
if (!intel_has_sagv(dev_priv))
|
||||||
dev_priv->skl_sagv_status == I915_SKL_SAGV_DISABLED)
|
return 0;
|
||||||
|
|
||||||
|
if (dev_priv->sagv_status == I915_SAGV_DISABLED)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
DRM_DEBUG_KMS("Disabling the SAGV\n");
|
DRM_DEBUG_KMS("Disabling the SAGV\n");
|
||||||
mutex_lock(&dev_priv->rps.hw_lock);
|
mutex_lock(&dev_priv->rps.hw_lock);
|
||||||
|
|
||||||
/* bspec says to keep retrying for at least 1 ms */
|
/* bspec says to keep retrying for at least 1 ms */
|
||||||
ret = wait_for(result = skl_do_sagv_disable(dev_priv), 1);
|
ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1);
|
||||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||||
|
|
||||||
if (ret == -ETIMEDOUT) {
|
if (ret == -ETIMEDOUT) {
|
||||||
@ -2962,20 +2981,20 @@ skl_disable_sagv(struct drm_i915_private *dev_priv)
|
|||||||
* Some skl systems, pre-release machines in particular,
|
* Some skl systems, pre-release machines in particular,
|
||||||
* don't actually have an SAGV.
|
* don't actually have an SAGV.
|
||||||
*/
|
*/
|
||||||
if (result == -ENXIO) {
|
if (IS_SKYLAKE(dev_priv) && result == -ENXIO) {
|
||||||
DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
|
DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
|
||||||
dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
|
dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
|
||||||
return 0;
|
return 0;
|
||||||
} else if (result < 0) {
|
} else if (result < 0) {
|
||||||
DRM_ERROR("Failed to disable the SAGV\n");
|
DRM_ERROR("Failed to disable the SAGV\n");
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_priv->skl_sagv_status = I915_SKL_SAGV_DISABLED;
|
dev_priv->sagv_status = I915_SAGV_DISABLED;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool skl_can_enable_sagv(struct drm_atomic_state *state)
|
bool intel_can_enable_sagv(struct drm_atomic_state *state)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = state->dev;
|
struct drm_device *dev = state->dev;
|
||||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||||
@ -2984,6 +3003,9 @@ bool skl_can_enable_sagv(struct drm_atomic_state *state)
|
|||||||
enum pipe pipe;
|
enum pipe pipe;
|
||||||
int level, plane;
|
int level, plane;
|
||||||
|
|
||||||
|
if (!intel_has_sagv(dev_priv))
|
||||||
|
return false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* SKL workaround: bspec recommends we disable the SAGV when we have
|
* SKL workaround: bspec recommends we disable the SAGV when we have
|
||||||
* more then one pipe enabled
|
* more then one pipe enabled
|
||||||
@ -3472,29 +3494,14 @@ static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latenc
|
|||||||
}
|
}
|
||||||
|
|
||||||
static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
|
static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
|
||||||
uint32_t horiz_pixels, uint8_t cpp,
|
uint32_t latency, uint32_t plane_blocks_per_line)
|
||||||
uint64_t tiling, uint32_t latency)
|
|
||||||
{
|
{
|
||||||
uint32_t ret;
|
uint32_t ret;
|
||||||
uint32_t plane_bytes_per_line, plane_blocks_per_line;
|
|
||||||
uint32_t wm_intermediate_val;
|
uint32_t wm_intermediate_val;
|
||||||
|
|
||||||
if (latency == 0)
|
if (latency == 0)
|
||||||
return UINT_MAX;
|
return UINT_MAX;
|
||||||
|
|
||||||
plane_bytes_per_line = horiz_pixels * cpp;
|
|
||||||
|
|
||||||
if (tiling == I915_FORMAT_MOD_Y_TILED ||
|
|
||||||
tiling == I915_FORMAT_MOD_Yf_TILED) {
|
|
||||||
plane_bytes_per_line *= 4;
|
|
||||||
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
|
|
||||||
plane_blocks_per_line /= 4;
|
|
||||||
} else if (tiling == DRM_FORMAT_MOD_NONE) {
|
|
||||||
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
|
|
||||||
} else {
|
|
||||||
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
|
|
||||||
}
|
|
||||||
|
|
||||||
wm_intermediate_val = latency * pixel_rate;
|
wm_intermediate_val = latency * pixel_rate;
|
||||||
ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
|
ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
|
||||||
plane_blocks_per_line;
|
plane_blocks_per_line;
|
||||||
@ -3545,6 +3552,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
|
|||||||
uint8_t cpp;
|
uint8_t cpp;
|
||||||
uint32_t width = 0, height = 0;
|
uint32_t width = 0, height = 0;
|
||||||
uint32_t plane_pixel_rate;
|
uint32_t plane_pixel_rate;
|
||||||
|
uint32_t y_tile_minimum, y_min_scanlines;
|
||||||
|
|
||||||
if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
|
if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
|
||||||
*enabled = false;
|
*enabled = false;
|
||||||
@ -3560,38 +3568,51 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
|
|||||||
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||||
plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
|
plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
|
||||||
|
|
||||||
|
if (intel_rotation_90_or_270(pstate->rotation)) {
|
||||||
|
int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
|
||||||
|
drm_format_plane_cpp(fb->pixel_format, 1) :
|
||||||
|
drm_format_plane_cpp(fb->pixel_format, 0);
|
||||||
|
|
||||||
|
switch (cpp) {
|
||||||
|
case 1:
|
||||||
|
y_min_scanlines = 16;
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
y_min_scanlines = 8;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
WARN(1, "Unsupported pixel depth for rotation");
|
||||||
|
case 4:
|
||||||
|
y_min_scanlines = 4;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
y_min_scanlines = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
plane_bytes_per_line = width * cpp;
|
||||||
|
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
|
||||||
|
fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
|
||||||
|
plane_blocks_per_line =
|
||||||
|
DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512);
|
||||||
|
plane_blocks_per_line /= y_min_scanlines;
|
||||||
|
} else if (fb->modifier[0] == DRM_FORMAT_MOD_NONE) {
|
||||||
|
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512)
|
||||||
|
+ 1;
|
||||||
|
} else {
|
||||||
|
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
|
||||||
|
}
|
||||||
|
|
||||||
method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
|
method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
|
||||||
method2 = skl_wm_method2(plane_pixel_rate,
|
method2 = skl_wm_method2(plane_pixel_rate,
|
||||||
cstate->base.adjusted_mode.crtc_htotal,
|
cstate->base.adjusted_mode.crtc_htotal,
|
||||||
width,
|
latency,
|
||||||
cpp,
|
plane_blocks_per_line);
|
||||||
fb->modifier[0],
|
|
||||||
latency);
|
|
||||||
|
|
||||||
plane_bytes_per_line = width * cpp;
|
y_tile_minimum = plane_blocks_per_line * y_min_scanlines;
|
||||||
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
|
|
||||||
|
|
||||||
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
|
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
|
||||||
fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
|
fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
|
||||||
uint32_t min_scanlines = 4;
|
|
||||||
uint32_t y_tile_minimum;
|
|
||||||
if (intel_rotation_90_or_270(pstate->rotation)) {
|
|
||||||
int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
|
|
||||||
drm_format_plane_cpp(fb->pixel_format, 1) :
|
|
||||||
drm_format_plane_cpp(fb->pixel_format, 0);
|
|
||||||
|
|
||||||
switch (cpp) {
|
|
||||||
case 1:
|
|
||||||
min_scanlines = 16;
|
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
min_scanlines = 8;
|
|
||||||
break;
|
|
||||||
case 8:
|
|
||||||
WARN(1, "Unsupported pixel depth for rotation");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
y_tile_minimum = plane_blocks_per_line * min_scanlines;
|
|
||||||
selected_result = max(method2, y_tile_minimum);
|
selected_result = max(method2, y_tile_minimum);
|
||||||
} else {
|
} else {
|
||||||
if ((ddb_allocation / plane_blocks_per_line) >= 1)
|
if ((ddb_allocation / plane_blocks_per_line) >= 1)
|
||||||
@ -3605,10 +3626,12 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
|
|||||||
|
|
||||||
if (level >= 1 && level <= 7) {
|
if (level >= 1 && level <= 7) {
|
||||||
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
|
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
|
||||||
fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)
|
fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
|
||||||
res_lines += 4;
|
res_blocks += y_tile_minimum;
|
||||||
else
|
res_lines += y_min_scanlines;
|
||||||
|
} else {
|
||||||
res_blocks++;
|
res_blocks++;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (res_blocks >= ddb_allocation || res_lines > 31) {
|
if (res_blocks >= ddb_allocation || res_lines > 31) {
|
||||||
@ -3939,6 +3962,41 @@ pipes_modified(struct drm_atomic_state *state)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
|
||||||
|
{
|
||||||
|
struct drm_atomic_state *state = cstate->base.state;
|
||||||
|
struct drm_device *dev = state->dev;
|
||||||
|
struct drm_crtc *crtc = cstate->base.crtc;
|
||||||
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||||
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||||
|
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
|
||||||
|
struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
|
||||||
|
struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
|
||||||
|
struct drm_plane_state *plane_state;
|
||||||
|
struct drm_plane *plane;
|
||||||
|
enum pipe pipe = intel_crtc->pipe;
|
||||||
|
int id;
|
||||||
|
|
||||||
|
WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
|
||||||
|
|
||||||
|
drm_for_each_plane_mask(plane, dev, crtc->state->plane_mask) {
|
||||||
|
id = skl_wm_plane_id(to_intel_plane(plane));
|
||||||
|
|
||||||
|
if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][id],
|
||||||
|
&new_ddb->plane[pipe][id]) &&
|
||||||
|
skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][id],
|
||||||
|
&new_ddb->y_plane[pipe][id]))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
plane_state = drm_atomic_get_plane_state(state, plane);
|
||||||
|
if (IS_ERR(plane_state))
|
||||||
|
return PTR_ERR(plane_state);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
skl_compute_ddb(struct drm_atomic_state *state)
|
skl_compute_ddb(struct drm_atomic_state *state)
|
||||||
{
|
{
|
||||||
@ -4003,7 +4061,7 @@ skl_compute_ddb(struct drm_atomic_state *state)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = drm_atomic_add_affected_planes(state, &intel_crtc->base);
|
ret = skl_ddb_add_affected_planes(cstate);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -564,7 +564,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
|
|||||||
else
|
else
|
||||||
intel_ring_setup_status_page(engine);
|
intel_ring_setup_status_page(engine);
|
||||||
|
|
||||||
intel_engine_reset_irq(engine);
|
intel_engine_reset_breadcrumbs(engine);
|
||||||
|
|
||||||
/* Enforce ordering by reading HEAD register back */
|
/* Enforce ordering by reading HEAD register back */
|
||||||
I915_READ_HEAD(engine);
|
I915_READ_HEAD(engine);
|
||||||
|
@ -328,6 +328,7 @@ struct intel_engine_cs {
|
|||||||
* inspecting request list.
|
* inspecting request list.
|
||||||
*/
|
*/
|
||||||
u32 last_submitted_seqno;
|
u32 last_submitted_seqno;
|
||||||
|
u32 last_pending_seqno;
|
||||||
|
|
||||||
/* An RCU guarded pointer to the last request. No reference is
|
/* An RCU guarded pointer to the last request. No reference is
|
||||||
* held to the request, users must carefully acquire a reference to
|
* held to the request, users must carefully acquire a reference to
|
||||||
@ -492,7 +493,6 @@ int __intel_ring_space(int head, int tail, int size);
|
|||||||
void intel_ring_update_space(struct intel_ring *ring);
|
void intel_ring_update_space(struct intel_ring *ring);
|
||||||
|
|
||||||
void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
|
void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
|
||||||
void intel_engine_reset_irq(struct intel_engine_cs *engine);
|
|
||||||
|
|
||||||
void intel_engine_setup_common(struct intel_engine_cs *engine);
|
void intel_engine_setup_common(struct intel_engine_cs *engine);
|
||||||
int intel_engine_init_common(struct intel_engine_cs *engine);
|
int intel_engine_init_common(struct intel_engine_cs *engine);
|
||||||
@ -584,6 +584,7 @@ static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
|
|||||||
return wakeup;
|
return wakeup;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
|
||||||
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
|
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
|
||||||
unsigned int intel_kick_waiters(struct drm_i915_private *i915);
|
unsigned int intel_kick_waiters(struct drm_i915_private *i915);
|
||||||
unsigned int intel_kick_signalers(struct drm_i915_private *i915);
|
unsigned int intel_kick_signalers(struct drm_i915_private *i915);
|
||||||
|
@ -796,10 +796,9 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
|
|||||||
const bool read,
|
const bool read,
|
||||||
const bool before)
|
const bool before)
|
||||||
{
|
{
|
||||||
if (WARN(check_for_unclaimed_mmio(dev_priv),
|
if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
|
||||||
"Unclaimed register detected %s %s register 0x%x\n",
|
"Unclaimed %s register 0x%x\n",
|
||||||
before ? "before" : "after",
|
read ? "read from" : "write to",
|
||||||
read ? "reading" : "writing to",
|
|
||||||
i915_mmio_reg_offset(reg)))
|
i915_mmio_reg_offset(reg)))
|
||||||
i915.mmio_debug--; /* Only report the first N failures */
|
i915.mmio_debug--; /* Only report the first N failures */
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user