Merge tag 'drm-intel-next-fixes-2014-10-03' of git://anongit.freedesktop.org/drm-intel into drm-next
Bunch of fixes for 3.18. Major parts: - ppgtt fixes (but full ppgtt is for 3.19) from Chris, Michel, ... - hdmi pixel replication fixes (Clint Taylor) - leftover i830M patches from Ville - small things all over * tag 'drm-intel-next-fixes-2014-10-03' of git://anongit.freedesktop.org/drm-intel: (21 commits) drm/i915: Enable pixel replicated modes on BDW and HSW. drm/i915: Don't spam dmesg with rps messages on vlv/chv drm/i915: Do not leak pages when freeing userptr objects drm/i915: Do not store the error pointer for a failed userptr registration Revert "drm/i915/bdw: BDW Software Turbo" drm/i915/bdw: Cleanup pre prod workarounds drm/i915: Use EIO instead of EAGAIN for sink CRC error. drm/i915: Extend BIOS stolen mem handling to all platform drm/i915: Match GTT space sanity checker with implementation drm/i915: HSW always use GGTT selector for secure batches drm/i915: add cherryview specfic forcewake in execlists_elsp_write drm/i915: fix another use-after-free in i915_gem_evict_everything drm/i915: Don't reinit hpd interrupts after gpu reset drm/i915: Wrap -EIO send-vblank event for failed pageflip in spinlock drm/i915: Drop any active reference before unbinding drm/i915: Objects on the unbound list may still have an active reference drm/i915/edp: use lane count and link rate from DPCD for eDP drm/i915/dp: add missing \n in the TPS3 debug message drm/i915/hdmi, dp: Do not dereference the encoder in the connector destroy drm/i915: Limit the watermark to at least 8 entries on gen2/3 ...
This commit is contained in:
commit
436e94a4cb
@ -3826,7 +3826,6 @@ i915_drop_caches_set(void *data, u64 val)
|
||||
{
|
||||
struct drm_device *dev = data;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj, *next;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
|
||||
@ -3846,36 +3845,11 @@ i915_drop_caches_set(void *data, u64 val)
|
||||
if (val & (DROP_RETIRE | DROP_ACTIVE))
|
||||
i915_gem_retire_requests(dev);
|
||||
|
||||
if (val & DROP_BOUND) {
|
||||
list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
|
||||
global_list) {
|
||||
struct i915_vma *vma, *v;
|
||||
if (val & DROP_BOUND)
|
||||
i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
|
||||
|
||||
ret = 0;
|
||||
drm_gem_object_reference(&obj->base);
|
||||
list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) {
|
||||
if (vma->pin_count)
|
||||
continue;
|
||||
|
||||
ret = i915_vma_unbind(vma);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
|
||||
if (val & DROP_UNBOUND) {
|
||||
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
|
||||
global_list)
|
||||
if (obj->pages_pin_count == 0) {
|
||||
ret = i915_gem_object_put_pages(obj);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
if (val & DROP_UNBOUND)
|
||||
i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -871,8 +871,6 @@ int i915_reset(struct drm_device *dev)
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen > 5)
|
||||
intel_reset_gt_powersave(dev);
|
||||
|
||||
intel_hpd_init(dev);
|
||||
} else {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
@ -946,23 +946,6 @@ struct intel_rps_ei {
|
||||
u32 media_c0;
|
||||
};
|
||||
|
||||
struct intel_rps_bdw_cal {
|
||||
u32 it_threshold_pct; /* interrupt, in percentage */
|
||||
u32 eval_interval; /* evaluation interval, in us */
|
||||
u32 last_ts;
|
||||
u32 last_c0;
|
||||
bool is_up;
|
||||
};
|
||||
|
||||
struct intel_rps_bdw_turbo {
|
||||
struct intel_rps_bdw_cal up;
|
||||
struct intel_rps_bdw_cal down;
|
||||
struct timer_list flip_timer;
|
||||
u32 timeout;
|
||||
atomic_t flip_received;
|
||||
struct work_struct work_max_freq;
|
||||
};
|
||||
|
||||
struct intel_gen6_power_mgmt {
|
||||
/* work and pm_iir are protected by dev_priv->irq_lock */
|
||||
struct work_struct work;
|
||||
@ -996,9 +979,6 @@ struct intel_gen6_power_mgmt {
|
||||
bool enabled;
|
||||
struct delayed_work delayed_resume_work;
|
||||
|
||||
bool is_bdw_sw_turbo; /* Switch of BDW software turbo */
|
||||
struct intel_rps_bdw_turbo sw_turbo; /* Calculate RP interrupt timing */
|
||||
|
||||
/* manual wa residency calculations */
|
||||
struct intel_rps_ei up_ei, down_ei;
|
||||
|
||||
@ -2369,6 +2349,12 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
||||
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void i915_gem_load(struct drm_device *dev);
|
||||
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
|
||||
long target,
|
||||
unsigned flags);
|
||||
#define I915_SHRINK_PURGEABLE 0x1
|
||||
#define I915_SHRINK_UNBOUND 0x2
|
||||
#define I915_SHRINK_BOUND 0x4
|
||||
void *i915_gem_object_alloc(struct drm_device *dev);
|
||||
void i915_gem_object_free(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
||||
@ -2823,8 +2809,6 @@ extern void intel_disable_fbc(struct drm_device *dev);
|
||||
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
|
||||
extern void intel_init_pch_refclk(struct drm_device *dev);
|
||||
extern void gen6_set_rps(struct drm_device *dev, u8 val);
|
||||
extern void bdw_software_turbo(struct drm_device *dev);
|
||||
extern void gen8_flip_interrupt(struct drm_device *dev);
|
||||
extern void valleyview_set_rps(struct drm_device *dev, u8 val);
|
||||
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
|
||||
bool enable);
|
||||
|
@ -60,7 +60,6 @@ static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
|
||||
static int i915_gem_shrinker_oom(struct notifier_block *nb,
|
||||
unsigned long event,
|
||||
void *ptr);
|
||||
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
|
||||
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
|
||||
|
||||
static bool cpu_cache_is_coherent(struct drm_device *dev,
|
||||
@ -1741,7 +1740,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
|
||||
* offsets on purgeable objects by truncating it and marking it purged,
|
||||
* which prevents userspace from ever using that object again.
|
||||
*/
|
||||
i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
|
||||
i915_gem_shrink(dev_priv,
|
||||
obj->base.size >> PAGE_SHIFT,
|
||||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND |
|
||||
I915_SHRINK_PURGEABLE);
|
||||
ret = drm_gem_create_mmap_offset(&obj->base);
|
||||
if (ret != -ENOSPC)
|
||||
goto out;
|
||||
@ -1938,12 +1941,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
|
||||
bool purgeable_only)
|
||||
unsigned long
|
||||
i915_gem_shrink(struct drm_i915_private *dev_priv,
|
||||
long target, unsigned flags)
|
||||
{
|
||||
struct list_head still_in_list;
|
||||
struct drm_i915_gem_object *obj;
|
||||
const bool purgeable_only = flags & I915_SHRINK_PURGEABLE;
|
||||
unsigned long count = 0;
|
||||
|
||||
/*
|
||||
@ -1965,62 +1967,68 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
|
||||
* dev->struct_mutex and so we won't ever be able to observe an
|
||||
* object on the bound_list with a reference count equals 0.
|
||||
*/
|
||||
INIT_LIST_HEAD(&still_in_list);
|
||||
while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
|
||||
obj = list_first_entry(&dev_priv->mm.unbound_list,
|
||||
typeof(*obj), global_list);
|
||||
list_move_tail(&obj->global_list, &still_in_list);
|
||||
if (flags & I915_SHRINK_UNBOUND) {
|
||||
struct list_head still_in_list;
|
||||
|
||||
if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
|
||||
continue;
|
||||
INIT_LIST_HEAD(&still_in_list);
|
||||
while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
drm_gem_object_reference(&obj->base);
|
||||
obj = list_first_entry(&dev_priv->mm.unbound_list,
|
||||
typeof(*obj), global_list);
|
||||
list_move_tail(&obj->global_list, &still_in_list);
|
||||
|
||||
if (i915_gem_object_put_pages(obj) == 0)
|
||||
count += obj->base.size >> PAGE_SHIFT;
|
||||
if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
|
||||
continue;
|
||||
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
drm_gem_object_reference(&obj->base);
|
||||
|
||||
if (i915_gem_object_put_pages(obj) == 0)
|
||||
count += obj->base.size >> PAGE_SHIFT;
|
||||
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
}
|
||||
list_splice(&still_in_list, &dev_priv->mm.unbound_list);
|
||||
}
|
||||
list_splice(&still_in_list, &dev_priv->mm.unbound_list);
|
||||
|
||||
INIT_LIST_HEAD(&still_in_list);
|
||||
while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
|
||||
struct i915_vma *vma, *v;
|
||||
if (flags & I915_SHRINK_BOUND) {
|
||||
struct list_head still_in_list;
|
||||
|
||||
obj = list_first_entry(&dev_priv->mm.bound_list,
|
||||
typeof(*obj), global_list);
|
||||
list_move_tail(&obj->global_list, &still_in_list);
|
||||
INIT_LIST_HEAD(&still_in_list);
|
||||
while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_vma *vma, *v;
|
||||
|
||||
if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
|
||||
continue;
|
||||
obj = list_first_entry(&dev_priv->mm.bound_list,
|
||||
typeof(*obj), global_list);
|
||||
list_move_tail(&obj->global_list, &still_in_list);
|
||||
|
||||
drm_gem_object_reference(&obj->base);
|
||||
if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
|
||||
continue;
|
||||
|
||||
list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
|
||||
if (i915_vma_unbind(vma))
|
||||
break;
|
||||
drm_gem_object_reference(&obj->base);
|
||||
|
||||
if (i915_gem_object_put_pages(obj) == 0)
|
||||
count += obj->base.size >> PAGE_SHIFT;
|
||||
list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
|
||||
if (i915_vma_unbind(vma))
|
||||
break;
|
||||
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
if (i915_gem_object_put_pages(obj) == 0)
|
||||
count += obj->base.size >> PAGE_SHIFT;
|
||||
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
}
|
||||
list_splice(&still_in_list, &dev_priv->mm.bound_list);
|
||||
}
|
||||
list_splice(&still_in_list, &dev_priv->mm.bound_list);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
i915_gem_purge(struct drm_i915_private *dev_priv, long target)
|
||||
{
|
||||
return __i915_gem_shrink(dev_priv, target, true);
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
i915_gem_evict_everything(dev_priv->dev);
|
||||
return __i915_gem_shrink(dev_priv, LONG_MAX, false);
|
||||
return i915_gem_shrink(dev_priv, LONG_MAX,
|
||||
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -2067,7 +2075,11 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
||||
for (i = 0; i < page_count; i++) {
|
||||
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
|
||||
if (IS_ERR(page)) {
|
||||
i915_gem_purge(dev_priv, page_count);
|
||||
i915_gem_shrink(dev_priv,
|
||||
page_count,
|
||||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND |
|
||||
I915_SHRINK_PURGEABLE);
|
||||
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
|
||||
}
|
||||
if (IS_ERR(page)) {
|
||||
@ -2944,6 +2956,9 @@ int i915_vma_unbind(struct i915_vma *vma)
|
||||
* cause memory corruption through use-after-free.
|
||||
*/
|
||||
|
||||
/* Throw away the active reference before moving to the unbound list */
|
||||
i915_gem_object_retire(obj);
|
||||
|
||||
if (i915_is_ggtt(vma->vm)) {
|
||||
i915_gem_object_finish_gtt(obj);
|
||||
|
||||
@ -3336,17 +3351,20 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool i915_gem_valid_gtt_space(struct drm_device *dev,
|
||||
struct drm_mm_node *gtt_space,
|
||||
static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
|
||||
unsigned long cache_level)
|
||||
{
|
||||
struct drm_mm_node *gtt_space = &vma->node;
|
||||
struct drm_mm_node *other;
|
||||
|
||||
/* On non-LLC machines we have to be careful when putting differing
|
||||
* types of snoopable memory together to avoid the prefetcher
|
||||
* crossing memory domains and dying.
|
||||
/*
|
||||
* On some machines we have to be careful when putting differing types
|
||||
* of snoopable memory together to avoid the prefetcher crossing memory
|
||||
* domains and dying. During vm initialisation, we decide whether or not
|
||||
* these constraints apply and set the drm_mm.color_adjust
|
||||
* appropriately.
|
||||
*/
|
||||
if (HAS_LLC(dev))
|
||||
if (vma->vm->mm.color_adjust == NULL)
|
||||
return true;
|
||||
|
||||
if (!drm_mm_node_allocated(gtt_space))
|
||||
@ -3484,8 +3502,7 @@ search_free:
|
||||
|
||||
goto err_free_vma;
|
||||
}
|
||||
if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
|
||||
obj->cache_level))) {
|
||||
if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
|
||||
ret = -EINVAL;
|
||||
goto err_remove_node;
|
||||
}
|
||||
@ -3695,7 +3712,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
|
||||
if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
|
||||
if (!i915_gem_valid_gtt_space(vma, cache_level)) {
|
||||
ret = i915_vma_unbind(vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -5261,11 +5278,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
||||
if (!i915_gem_shrinker_lock(dev, &unlock))
|
||||
return SHRINK_STOP;
|
||||
|
||||
freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
|
||||
freed = i915_gem_shrink(dev_priv,
|
||||
sc->nr_to_scan,
|
||||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND |
|
||||
I915_SHRINK_PURGEABLE);
|
||||
if (freed < sc->nr_to_scan)
|
||||
freed += __i915_gem_shrink(dev_priv,
|
||||
sc->nr_to_scan - freed,
|
||||
false);
|
||||
freed += i915_gem_shrink(dev_priv,
|
||||
sc->nr_to_scan - freed,
|
||||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND);
|
||||
if (unlock)
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
|
@ -243,7 +243,7 @@ int
|
||||
i915_gem_evict_everything(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_address_space *vm;
|
||||
struct i915_address_space *vm, *v;
|
||||
bool lists_empty = true;
|
||||
int ret;
|
||||
|
||||
@ -270,7 +270,7 @@ i915_gem_evict_everything(struct drm_device *dev)
|
||||
i915_gem_retire_requests(dev);
|
||||
|
||||
/* Having flushed everything, unbind() should never raise an error */
|
||||
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
|
||||
list_for_each_entry_safe(vm, v, &dev_priv->vm_list, global_link)
|
||||
WARN_ON(i915_gem_evict_vm(vm, false));
|
||||
|
||||
return 0;
|
||||
|
@ -289,6 +289,7 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
|
||||
int i915_gem_init_stolen(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 tmp;
|
||||
int bios_reserved = 0;
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
@ -308,8 +309,16 @@ int i915_gem_init_stolen(struct drm_device *dev)
|
||||
DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
|
||||
dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
|
||||
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
tmp = I915_READ(GEN7_BIOS_RESERVED);
|
||||
tmp >>= GEN8_BIOS_RESERVED_SHIFT;
|
||||
tmp &= GEN8_BIOS_RESERVED_MASK;
|
||||
bios_reserved = (1024*1024) << tmp;
|
||||
} else if (IS_GEN7(dev)) {
|
||||
tmp = I915_READ(GEN7_BIOS_RESERVED);
|
||||
bios_reserved = tmp & GEN7_BIOS_RESERVED_256K ?
|
||||
256*1024 : 1024*1024;
|
||||
}
|
||||
|
||||
if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
|
||||
return 0;
|
||||
|
@ -293,15 +293,23 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
|
||||
static struct i915_mmu_notifier *
|
||||
i915_mmu_notifier_find(struct i915_mm_struct *mm)
|
||||
{
|
||||
if (mm->mn == NULL) {
|
||||
down_write(&mm->mm->mmap_sem);
|
||||
mutex_lock(&to_i915(mm->dev)->mm_lock);
|
||||
if (mm->mn == NULL)
|
||||
mm->mn = i915_mmu_notifier_create(mm->mm);
|
||||
mutex_unlock(&to_i915(mm->dev)->mm_lock);
|
||||
up_write(&mm->mm->mmap_sem);
|
||||
struct i915_mmu_notifier *mn = mm->mn;
|
||||
|
||||
mn = mm->mn;
|
||||
if (mn)
|
||||
return mn;
|
||||
|
||||
down_write(&mm->mm->mmap_sem);
|
||||
mutex_lock(&to_i915(mm->dev)->mm_lock);
|
||||
if ((mn = mm->mn) == NULL) {
|
||||
mn = i915_mmu_notifier_create(mm->mm);
|
||||
if (!IS_ERR(mn))
|
||||
mm->mn = mn;
|
||||
}
|
||||
return mm->mn;
|
||||
mutex_unlock(&to_i915(mm->dev)->mm_lock);
|
||||
up_write(&mm->mm->mmap_sem);
|
||||
|
||||
return mn;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -681,16 +689,15 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
|
||||
static void
|
||||
i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
struct sg_page_iter sg_iter;
|
||||
|
||||
BUG_ON(obj->userptr.work != NULL);
|
||||
|
||||
if (obj->madv != I915_MADV_WILLNEED)
|
||||
obj->dirty = 0;
|
||||
|
||||
for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
|
||||
struct page *page = sg_page(sg);
|
||||
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
|
||||
struct page *page = sg_page_iter_page(&sg_iter);
|
||||
|
||||
if (obj->dirty)
|
||||
set_page_dirty(page);
|
||||
|
@ -1979,27 +1979,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
|
||||
res1, res2);
|
||||
}
|
||||
|
||||
void gen8_flip_interrupt(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (!dev_priv->rps.is_bdw_sw_turbo)
|
||||
return;
|
||||
|
||||
if(atomic_read(&dev_priv->rps.sw_turbo.flip_received)) {
|
||||
mod_timer(&dev_priv->rps.sw_turbo.flip_timer,
|
||||
usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies);
|
||||
}
|
||||
else {
|
||||
dev_priv->rps.sw_turbo.flip_timer.expires =
|
||||
usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies;
|
||||
add_timer(&dev_priv->rps.sw_turbo.flip_timer);
|
||||
atomic_set(&dev_priv->rps.sw_turbo.flip_received, true);
|
||||
}
|
||||
|
||||
bdw_software_turbo(dev);
|
||||
}
|
||||
|
||||
/* The RPS events need forcewake, so we add them to a work queue and mask their
|
||||
* IMR bits until the work is done. Other interrupts can be processed without
|
||||
* the work queue. */
|
||||
|
@ -143,6 +143,14 @@
|
||||
#define GAB_CTL 0x24000
|
||||
#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
|
||||
|
||||
#define GEN7_BIOS_RESERVED 0x1082C0
|
||||
#define GEN7_BIOS_RESERVED_1M (0 << 5)
|
||||
#define GEN7_BIOS_RESERVED_256K (1 << 5)
|
||||
#define GEN8_BIOS_RESERVED_SHIFT 7
|
||||
#define GEN7_BIOS_RESERVED_MASK 0x1
|
||||
#define GEN8_BIOS_RESERVED_MASK 0x3
|
||||
|
||||
|
||||
/* VGA stuff */
|
||||
|
||||
#define VGA_ST01_MDA 0x3ba
|
||||
@ -2435,6 +2443,7 @@ enum punit_power_well {
|
||||
#define _PIPEASRC 0x6001c
|
||||
#define _BCLRPAT_A 0x60020
|
||||
#define _VSYNCSHIFT_A 0x60028
|
||||
#define _PIPE_MULT_A 0x6002c
|
||||
|
||||
/* Pipe B timing regs */
|
||||
#define _HTOTAL_B 0x61000
|
||||
@ -2446,6 +2455,7 @@ enum punit_power_well {
|
||||
#define _PIPEBSRC 0x6101c
|
||||
#define _BCLRPAT_B 0x61020
|
||||
#define _VSYNCSHIFT_B 0x61028
|
||||
#define _PIPE_MULT_B 0x6102c
|
||||
|
||||
#define TRANSCODER_A_OFFSET 0x60000
|
||||
#define TRANSCODER_B_OFFSET 0x61000
|
||||
@ -2466,6 +2476,7 @@ enum punit_power_well {
|
||||
#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A)
|
||||
#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A)
|
||||
#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
|
||||
#define PIPE_MULT(trans) _TRANSCODER2(trans, _PIPE_MULT_A)
|
||||
|
||||
/* HSW+ eDP PSR registers */
|
||||
#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
|
||||
@ -5577,10 +5588,6 @@ enum punit_power_well {
|
||||
#define GEN8_UCGCTL6 0x9430
|
||||
#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
|
||||
|
||||
#define TIMESTAMP_CTR 0x44070
|
||||
#define FREQ_1_28_US(us) (((us) * 100) >> 7)
|
||||
#define MCHBAR_PCU_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5960)
|
||||
|
||||
#define GEN6_GFXPAUSE 0xA000
|
||||
#define GEN6_RPNSWREQ 0xA008
|
||||
#define GEN6_TURBO_DISABLE (1<<31)
|
||||
|
@ -1612,6 +1612,18 @@ static void chv_enable_pll(struct intel_crtc *crtc)
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
}
|
||||
|
||||
static int intel_num_dvo_pipes(struct drm_device *dev)
|
||||
{
|
||||
struct intel_crtc *crtc;
|
||||
int count = 0;
|
||||
|
||||
for_each_intel_crtc(dev, crtc)
|
||||
count += crtc->active &&
|
||||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static void i9xx_enable_pll(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
@ -1628,7 +1640,18 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
|
||||
if (IS_MOBILE(dev) && !IS_I830(dev))
|
||||
assert_panel_unlocked(dev_priv, crtc->pipe);
|
||||
|
||||
I915_WRITE(reg, dpll);
|
||||
/* Enable DVO 2x clock on both PLLs if necessary */
|
||||
if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
|
||||
/*
|
||||
* It appears to be important that we don't enable this
|
||||
* for the current pipe before otherwise configuring the
|
||||
* PLL. No idea how this should be handled if multiple
|
||||
* DVO outputs are enabled simultaneosly.
|
||||
*/
|
||||
dpll |= DPLL_DVO_2X_MODE;
|
||||
I915_WRITE(DPLL(!crtc->pipe),
|
||||
I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
|
||||
}
|
||||
|
||||
/* Wait for the clocks to stabilize. */
|
||||
POSTING_READ(reg);
|
||||
@ -1667,8 +1690,22 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
|
||||
*
|
||||
* Note! This is for pre-ILK only.
|
||||
*/
|
||||
static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
static void i9xx_disable_pll(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
/* Disable DVO 2x clock on both PLLs if necessary */
|
||||
if (IS_I830(dev) &&
|
||||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO) &&
|
||||
intel_num_dvo_pipes(dev) == 1) {
|
||||
I915_WRITE(DPLL(PIPE_B),
|
||||
I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
|
||||
I915_WRITE(DPLL(PIPE_A),
|
||||
I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
|
||||
}
|
||||
|
||||
/* Don't disable pipe or pipe PLLs if needed */
|
||||
if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
|
||||
(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
|
||||
@ -4185,6 +4222,11 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
|
||||
|
||||
intel_set_pipe_timings(intel_crtc);
|
||||
|
||||
if (intel_crtc->config.cpu_transcoder != TRANSCODER_EDP) {
|
||||
I915_WRITE(PIPE_MULT(intel_crtc->config.cpu_transcoder),
|
||||
intel_crtc->config.pixel_multiplier - 1);
|
||||
}
|
||||
|
||||
if (intel_crtc->config.has_pch_encoder) {
|
||||
intel_cpu_transcoder_set_m_n(intel_crtc,
|
||||
&intel_crtc->config.fdi_m_n, NULL);
|
||||
@ -4941,7 +4983,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
vlv_disable_pll(dev_priv, pipe);
|
||||
else
|
||||
i9xx_disable_pll(dev_priv, pipe);
|
||||
i9xx_disable_pll(intel_crtc);
|
||||
}
|
||||
|
||||
if (!IS_GEN2(dev))
|
||||
@ -5945,7 +5987,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
|
||||
dpll |= PLL_P2_DIVIDE_BY_4;
|
||||
}
|
||||
|
||||
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
|
||||
if (!IS_I830(dev) && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
|
||||
dpll |= DPLL_DVO_2X_MODE;
|
||||
|
||||
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
|
||||
@ -6451,6 +6493,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
|
||||
}
|
||||
pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
|
||||
if (!IS_VALLEYVIEW(dev)) {
|
||||
/*
|
||||
* DPLL_DVO_2X_MODE must be enabled for both DPLLs
|
||||
* on 830. Filter it out here so that we don't
|
||||
* report errors due to that.
|
||||
*/
|
||||
if (IS_I830(dev))
|
||||
pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
|
||||
|
||||
pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
|
||||
pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
|
||||
} else {
|
||||
@ -7845,7 +7895,12 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
|
||||
pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
|
||||
(I915_READ(IPS_CTL) & IPS_ENABLE);
|
||||
|
||||
pipe_config->pixel_multiplier = 1;
|
||||
if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
|
||||
pipe_config->pixel_multiplier =
|
||||
I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
|
||||
} else {
|
||||
pipe_config->pixel_multiplier = 1;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -9881,9 +9936,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
//trigger software GT busyness calculation
|
||||
gen8_flip_interrupt(dev);
|
||||
|
||||
/*
|
||||
* drm_mode_page_flip_ioctl() should already catch this, but double
|
||||
* check to be safe. In the future we may enable pageflipping from
|
||||
@ -10039,8 +10091,11 @@ free_work:
|
||||
out_hang:
|
||||
intel_crtc_wait_for_pending_flips(crtc);
|
||||
ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
|
||||
if (ret == 0 && event)
|
||||
if (ret == 0 && event) {
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
drm_send_vblank_event(dev, pipe, event);
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -1068,23 +1068,15 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
bpp = dev_priv->vbt.edp_bpp;
|
||||
}
|
||||
|
||||
if (IS_BROADWELL(dev)) {
|
||||
/* Yes, it's an ugly hack. */
|
||||
min_lane_count = max_lane_count;
|
||||
DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n",
|
||||
min_lane_count);
|
||||
} else if (dev_priv->vbt.edp_lanes) {
|
||||
min_lane_count = min(dev_priv->vbt.edp_lanes,
|
||||
max_lane_count);
|
||||
DRM_DEBUG_KMS("using min %u lanes per VBT\n",
|
||||
min_lane_count);
|
||||
}
|
||||
|
||||
if (dev_priv->vbt.edp_rate) {
|
||||
min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
|
||||
DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
|
||||
bws[min_clock]);
|
||||
}
|
||||
/*
|
||||
* Use the maximum clock and number of lanes the eDP panel
|
||||
* advertizes being capable of. The panels are generally
|
||||
* designed to support only a single clock and lane
|
||||
* configuration, and typically these values correspond to the
|
||||
* native resolution of the panel.
|
||||
*/
|
||||
min_lane_count = max_lane_count;
|
||||
min_clock = max_clock;
|
||||
}
|
||||
|
||||
for (; bpp >= 6*3; bpp -= 2*3) {
|
||||
@ -3732,7 +3724,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
|
||||
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
|
||||
intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
|
||||
intel_dp->use_tps3 = true;
|
||||
DRM_DEBUG_KMS("Displayport TPS3 supported");
|
||||
DRM_DEBUG_KMS("Displayport TPS3 supported\n");
|
||||
} else
|
||||
intel_dp->use_tps3 = false;
|
||||
|
||||
@ -3808,21 +3800,21 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
|
||||
u8 buf[1];
|
||||
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
|
||||
return -EAGAIN;
|
||||
return -EIO;
|
||||
|
||||
if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
|
||||
return -ENOTTY;
|
||||
|
||||
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
|
||||
DP_TEST_SINK_START) < 0)
|
||||
return -EAGAIN;
|
||||
return -EIO;
|
||||
|
||||
/* Wait 2 vblanks to be sure we will have the correct CRC value */
|
||||
intel_wait_for_vblank(dev, intel_crtc->pipe);
|
||||
intel_wait_for_vblank(dev, intel_crtc->pipe);
|
||||
|
||||
if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
|
||||
return -EAGAIN;
|
||||
return -EIO;
|
||||
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
|
||||
return 0;
|
||||
@ -4395,7 +4387,7 @@ intel_dp_connector_destroy(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
|
||||
intel_dp_unset_edid(intel_attached_dp(connector));
|
||||
kfree(intel_connector->detect_edid);
|
||||
|
||||
if (!IS_ERR_OR_NULL(intel_connector->edid))
|
||||
kfree(intel_connector->edid);
|
||||
|
@ -1501,7 +1501,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
|
||||
|
||||
static void intel_hdmi_destroy(struct drm_connector *connector)
|
||||
{
|
||||
intel_hdmi_unset_edid(connector);
|
||||
kfree(to_intel_connector(connector)->detect_edid);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(connector);
|
||||
}
|
||||
|
@ -300,8 +300,18 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
|
||||
* Instead, we do the runtime_pm_get/put when creating/destroying requests.
|
||||
*/
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
|
||||
if (dev_priv->uncore.forcewake_count++ == 0)
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
|
||||
if (IS_CHERRYVIEW(dev_priv->dev)) {
|
||||
if (dev_priv->uncore.fw_rendercount++ == 0)
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv,
|
||||
FORCEWAKE_RENDER);
|
||||
if (dev_priv->uncore.fw_mediacount++ == 0)
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv,
|
||||
FORCEWAKE_MEDIA);
|
||||
} else {
|
||||
if (dev_priv->uncore.forcewake_count++ == 0)
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv,
|
||||
FORCEWAKE_ALL);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
|
||||
|
||||
I915_WRITE(RING_ELSP(ring), desc[1]);
|
||||
@ -315,8 +325,19 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
|
||||
|
||||
/* Release Force Wakeup (see the big comment above). */
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
|
||||
if (--dev_priv->uncore.forcewake_count == 0)
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
|
||||
if (IS_CHERRYVIEW(dev_priv->dev)) {
|
||||
if (--dev_priv->uncore.fw_rendercount == 0)
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv,
|
||||
FORCEWAKE_RENDER);
|
||||
if (--dev_priv->uncore.fw_mediacount == 0)
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv,
|
||||
FORCEWAKE_MEDIA);
|
||||
} else {
|
||||
if (--dev_priv->uncore.forcewake_count == 0)
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv,
|
||||
FORCEWAKE_ALL);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
|
||||
}
|
||||
|
||||
|
@ -1070,6 +1070,17 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
|
||||
wm_size = wm->max_wm;
|
||||
if (wm_size <= 0)
|
||||
wm_size = wm->default_wm;
|
||||
|
||||
/*
|
||||
* Bspec seems to indicate that the value shouldn't be lower than
|
||||
* 'burst size + 1'. Certainly 830 is quite unhappy with low values.
|
||||
* Lets go for 8 which is the burst size since certain platforms
|
||||
* already use a hardcoded 8 (which is what the spec says should be
|
||||
* done).
|
||||
*/
|
||||
if (wm_size <= 8)
|
||||
wm_size = 8;
|
||||
|
||||
return wm_size;
|
||||
}
|
||||
|
||||
@ -2274,6 +2285,7 @@ int ilk_wm_max_level(const struct drm_device *dev)
|
||||
else
|
||||
return 2;
|
||||
}
|
||||
|
||||
static void intel_print_wm_latency(struct drm_device *dev,
|
||||
const char *name,
|
||||
const uint16_t wm[5])
|
||||
@ -3242,9 +3254,6 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
|
||||
{
|
||||
int new_power;
|
||||
|
||||
if (dev_priv->rps.is_bdw_sw_turbo)
|
||||
return;
|
||||
|
||||
new_power = dev_priv->rps.power;
|
||||
switch (dev_priv->rps.power) {
|
||||
case LOW_POWER:
|
||||
@ -3452,11 +3461,8 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
|
||||
valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
vlv_set_rps_idle(dev_priv);
|
||||
else if (!dev_priv->rps.is_bdw_sw_turbo
|
||||
|| atomic_read(&dev_priv->rps.sw_turbo.flip_received)){
|
||||
else
|
||||
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
|
||||
}
|
||||
|
||||
dev_priv->rps.last_adj = 0;
|
||||
}
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
@ -3470,11 +3476,8 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv)
|
||||
if (dev_priv->rps.enabled) {
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
|
||||
else if (!dev_priv->rps.is_bdw_sw_turbo
|
||||
|| atomic_read(&dev_priv->rps.sw_turbo.flip_received)){
|
||||
else
|
||||
gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
|
||||
}
|
||||
|
||||
dev_priv->rps.last_adj = 0;
|
||||
}
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
@ -3488,17 +3491,18 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
|
||||
WARN_ON(val > dev_priv->rps.max_freq_softlimit);
|
||||
WARN_ON(val < dev_priv->rps.min_freq_softlimit);
|
||||
|
||||
DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
|
||||
vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
|
||||
dev_priv->rps.cur_freq,
|
||||
vlv_gpu_freq(dev_priv, val), val);
|
||||
|
||||
if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
|
||||
"Odd GPU freq value\n"))
|
||||
val &= ~1;
|
||||
|
||||
if (val != dev_priv->rps.cur_freq)
|
||||
if (val != dev_priv->rps.cur_freq) {
|
||||
DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
|
||||
vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
|
||||
dev_priv->rps.cur_freq,
|
||||
vlv_gpu_freq(dev_priv, val), val);
|
||||
|
||||
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
|
||||
}
|
||||
|
||||
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
|
||||
|
||||
@ -3509,26 +3513,21 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
|
||||
static void gen8_disable_rps_interrupts(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
if (IS_BROADWELL(dev) && dev_priv->rps.is_bdw_sw_turbo){
|
||||
if (atomic_read(&dev_priv->rps.sw_turbo.flip_received))
|
||||
del_timer(&dev_priv->rps.sw_turbo.flip_timer);
|
||||
dev_priv-> rps.is_bdw_sw_turbo = false;
|
||||
} else {
|
||||
I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
|
||||
I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
|
||||
~dev_priv->pm_rps_events);
|
||||
/* Complete PM interrupt masking here doesn't race with the rps work
|
||||
* item again unmasking PM interrupts because that is using a different
|
||||
* register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
|
||||
* leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
|
||||
* gen8_enable_rps will clean up. */
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
dev_priv->rps.pm_iir = 0;
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
|
||||
I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
|
||||
~dev_priv->pm_rps_events);
|
||||
/* Complete PM interrupt masking here doesn't race with the rps work
|
||||
* item again unmasking PM interrupts because that is using a different
|
||||
* register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
|
||||
* leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
|
||||
* gen8_enable_rps will clean up. */
|
||||
|
||||
I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
|
||||
}
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
dev_priv->rps.pm_iir = 0;
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
|
||||
}
|
||||
|
||||
static void gen6_disable_rps_interrupts(struct drm_device *dev)
|
||||
@ -3686,111 +3685,13 @@ static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_c
|
||||
dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
|
||||
}
|
||||
|
||||
static void bdw_sw_calculate_freq(struct drm_device *dev,
|
||||
struct intel_rps_bdw_cal *c, u32 *cur_time, u32 *c0)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u64 busy = 0;
|
||||
u32 busyness_pct = 0;
|
||||
u32 elapsed_time = 0;
|
||||
u16 new_freq = 0;
|
||||
|
||||
if (!c || !cur_time || !c0)
|
||||
return;
|
||||
|
||||
if (0 == c->last_c0)
|
||||
goto out;
|
||||
|
||||
/* Check Evaluation interval */
|
||||
elapsed_time = *cur_time - c->last_ts;
|
||||
if (elapsed_time < c->eval_interval)
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
|
||||
/*
|
||||
* c0 unit in 32*1.28 usec, elapsed_time unit in 1 usec.
|
||||
* Whole busyness_pct calculation should be
|
||||
* busy = ((u64)(*c0 - c->last_c0) << 5 << 7) / 100;
|
||||
* busyness_pct = (u32)(busy * 100 / elapsed_time);
|
||||
* The final formula is to simplify CPU calculation
|
||||
*/
|
||||
busy = (u64)(*c0 - c->last_c0) << 12;
|
||||
do_div(busy, elapsed_time);
|
||||
busyness_pct = (u32)busy;
|
||||
|
||||
if (c->is_up && busyness_pct >= c->it_threshold_pct)
|
||||
new_freq = (u16)dev_priv->rps.cur_freq + 3;
|
||||
if (!c->is_up && busyness_pct <= c->it_threshold_pct)
|
||||
new_freq = (u16)dev_priv->rps.cur_freq - 1;
|
||||
|
||||
/* Adjust to new frequency busyness and compare with threshold */
|
||||
if (0 != new_freq) {
|
||||
if (new_freq > dev_priv->rps.max_freq_softlimit)
|
||||
new_freq = dev_priv->rps.max_freq_softlimit;
|
||||
else if (new_freq < dev_priv->rps.min_freq_softlimit)
|
||||
new_freq = dev_priv->rps.min_freq_softlimit;
|
||||
|
||||
gen6_set_rps(dev, new_freq);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
out:
|
||||
c->last_c0 = *c0;
|
||||
c->last_ts = *cur_time;
|
||||
}
|
||||
|
||||
static void gen8_set_frequency_RP0(struct work_struct *work)
|
||||
{
|
||||
struct intel_rps_bdw_turbo *p_bdw_turbo =
|
||||
container_of(work, struct intel_rps_bdw_turbo, work_max_freq);
|
||||
struct intel_gen6_power_mgmt *p_power_mgmt =
|
||||
container_of(p_bdw_turbo, struct intel_gen6_power_mgmt, sw_turbo);
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(p_power_mgmt, struct drm_i915_private, rps);
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
gen6_set_rps(dev_priv->dev, dev_priv->rps.rp0_freq);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
|
||||
static void flip_active_timeout_handler(unsigned long var)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = (struct drm_i915_private *) var;
|
||||
|
||||
del_timer(&dev_priv->rps.sw_turbo.flip_timer);
|
||||
atomic_set(&dev_priv->rps.sw_turbo.flip_received, false);
|
||||
|
||||
queue_work(dev_priv->wq, &dev_priv->rps.sw_turbo.work_max_freq);
|
||||
}
|
||||
|
||||
void bdw_software_turbo(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
u32 current_time = I915_READ(TIMESTAMP_CTR); /* unit in usec */
|
||||
u32 current_c0 = I915_READ(MCHBAR_PCU_C0); /* unit in 32*1.28 usec */
|
||||
|
||||
bdw_sw_calculate_freq(dev, &dev_priv->rps.sw_turbo.up,
|
||||
¤t_time, ¤t_c0);
|
||||
bdw_sw_calculate_freq(dev, &dev_priv->rps.sw_turbo.down,
|
||||
¤t_time, ¤t_c0);
|
||||
}
|
||||
|
||||
static void gen8_enable_rps(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
uint32_t rc6_mask = 0, rp_state_cap;
|
||||
uint32_t threshold_up_pct, threshold_down_pct;
|
||||
uint32_t ei_up, ei_down; /* up and down evaluation interval */
|
||||
u32 rp_ctl_flag;
|
||||
int unused;
|
||||
|
||||
/* Use software Turbo for BDW */
|
||||
dev_priv->rps.is_bdw_sw_turbo = IS_BROADWELL(dev);
|
||||
|
||||
/* 1a: Software RC state - RC0 */
|
||||
I915_WRITE(GEN6_RC_STATE, 0);
|
||||
|
||||
@ -3834,74 +3735,35 @@ static void gen8_enable_rps(struct drm_device *dev)
|
||||
HSW_FREQUENCY(dev_priv->rps.rp1_freq));
|
||||
I915_WRITE(GEN6_RC_VIDEO_FREQ,
|
||||
HSW_FREQUENCY(dev_priv->rps.rp1_freq));
|
||||
ei_up = 84480; /* 84.48ms */
|
||||
ei_down = 448000;
|
||||
threshold_up_pct = 90; /* x percent busy */
|
||||
threshold_down_pct = 70;
|
||||
/* NB: Docs say 1s, and 1000000 - which aren't equivalent */
|
||||
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
|
||||
|
||||
if (dev_priv->rps.is_bdw_sw_turbo) {
|
||||
dev_priv->rps.sw_turbo.up.it_threshold_pct = threshold_up_pct;
|
||||
dev_priv->rps.sw_turbo.up.eval_interval = ei_up;
|
||||
dev_priv->rps.sw_turbo.up.is_up = true;
|
||||
dev_priv->rps.sw_turbo.up.last_ts = 0;
|
||||
dev_priv->rps.sw_turbo.up.last_c0 = 0;
|
||||
/* Docs recommend 900MHz, and 300 MHz respectively */
|
||||
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
|
||||
dev_priv->rps.max_freq_softlimit << 24 |
|
||||
dev_priv->rps.min_freq_softlimit << 16);
|
||||
|
||||
dev_priv->rps.sw_turbo.down.it_threshold_pct = threshold_down_pct;
|
||||
dev_priv->rps.sw_turbo.down.eval_interval = ei_down;
|
||||
dev_priv->rps.sw_turbo.down.is_up = false;
|
||||
dev_priv->rps.sw_turbo.down.last_ts = 0;
|
||||
dev_priv->rps.sw_turbo.down.last_c0 = 0;
|
||||
I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
|
||||
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
|
||||
I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
|
||||
I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
|
||||
|
||||
/* Start the timer to track if flip comes*/
|
||||
dev_priv->rps.sw_turbo.timeout = 200*1000; /* in us */
|
||||
|
||||
init_timer(&dev_priv->rps.sw_turbo.flip_timer);
|
||||
dev_priv->rps.sw_turbo.flip_timer.function = flip_active_timeout_handler;
|
||||
dev_priv->rps.sw_turbo.flip_timer.data = (unsigned long) dev_priv;
|
||||
dev_priv->rps.sw_turbo.flip_timer.expires =
|
||||
usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies;
|
||||
add_timer(&dev_priv->rps.sw_turbo.flip_timer);
|
||||
INIT_WORK(&dev_priv->rps.sw_turbo.work_max_freq, gen8_set_frequency_RP0);
|
||||
|
||||
atomic_set(&dev_priv->rps.sw_turbo.flip_received, true);
|
||||
} else {
|
||||
/* NB: Docs say 1s, and 1000000 - which aren't equivalent
|
||||
* 1 second timeout*/
|
||||
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, FREQ_1_28_US(1000000));
|
||||
|
||||
/* Docs recommend 900MHz, and 300 MHz respectively */
|
||||
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
|
||||
dev_priv->rps.max_freq_softlimit << 24 |
|
||||
dev_priv->rps.min_freq_softlimit << 16);
|
||||
|
||||
I915_WRITE(GEN6_RP_UP_THRESHOLD,
|
||||
FREQ_1_28_US(ei_up * threshold_up_pct / 100));
|
||||
I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
|
||||
FREQ_1_28_US(ei_down * threshold_down_pct / 100));
|
||||
I915_WRITE(GEN6_RP_UP_EI,
|
||||
FREQ_1_28_US(ei_up));
|
||||
I915_WRITE(GEN6_RP_DOWN_EI,
|
||||
FREQ_1_28_US(ei_down));
|
||||
|
||||
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
|
||||
}
|
||||
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
|
||||
|
||||
/* 5: Enable RPS */
|
||||
rp_ctl_flag = GEN6_RP_MEDIA_TURBO |
|
||||
GEN6_RP_MEDIA_HW_NORMAL_MODE |
|
||||
GEN6_RP_MEDIA_IS_GFX |
|
||||
GEN6_RP_UP_BUSY_AVG |
|
||||
GEN6_RP_DOWN_IDLE_AVG;
|
||||
if (!dev_priv->rps.is_bdw_sw_turbo)
|
||||
rp_ctl_flag |= GEN6_RP_ENABLE;
|
||||
I915_WRITE(GEN6_RP_CONTROL,
|
||||
GEN6_RP_MEDIA_TURBO |
|
||||
GEN6_RP_MEDIA_HW_NORMAL_MODE |
|
||||
GEN6_RP_MEDIA_IS_GFX |
|
||||
GEN6_RP_ENABLE |
|
||||
GEN6_RP_UP_BUSY_AVG |
|
||||
GEN6_RP_DOWN_IDLE_AVG);
|
||||
|
||||
I915_WRITE(GEN6_RP_CONTROL, rp_ctl_flag);
|
||||
/* 6: Ring frequency + overclocking (our driver does this later */
|
||||
|
||||
/* 6: Ring frequency + overclocking
|
||||
* (our driver does this later */
|
||||
gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
|
||||
if (!dev_priv->rps.is_bdw_sw_turbo)
|
||||
gen8_enable_rps_interrupts(dev);
|
||||
|
||||
gen8_enable_rps_interrupts(dev);
|
||||
|
||||
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
|
||||
}
|
||||
@ -5375,8 +5237,6 @@ static void intel_gen6_powersave_work(struct work_struct *work)
|
||||
rps.delayed_resume_work.work);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
dev_priv->rps.is_bdw_sw_turbo = false;
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
|
@ -707,7 +707,7 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
|
||||
* update the number of dwords required based on the
|
||||
* actual number of workarounds applied
|
||||
*/
|
||||
ret = intel_ring_begin(ring, 24);
|
||||
ret = intel_ring_begin(ring, 18);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -722,19 +722,8 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
|
||||
intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
|
||||
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
|
||||
|
||||
/*
|
||||
* This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
|
||||
* pre-production hardware
|
||||
*/
|
||||
intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
|
||||
_MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS
|
||||
| GEN8_SAMPLER_POWER_BYPASS_DIS));
|
||||
|
||||
intel_ring_emit_wa(ring, GEN7_HALF_SLICE_CHICKEN1,
|
||||
_MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
|
||||
|
||||
intel_ring_emit_wa(ring, COMMON_SLICE_CHICKEN2,
|
||||
_MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
|
||||
_MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
|
||||
|
||||
/* Use Force Non-Coherent whenever executing a 3D context. This is a
|
||||
* workaround for for a possible hang in the unlikely event a TLB
|
||||
@ -2203,8 +2192,9 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring,
|
||||
MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
|
||||
(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
|
||||
MI_BATCH_BUFFER_START |
|
||||
(flags & I915_DISPATCH_SECURE ?
|
||||
0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
|
||||
/* bit0-7 is the length on GEN6+ */
|
||||
intel_ring_emit(ring, offset);
|
||||
intel_ring_advance(ring);
|
||||
|
Loading…
Reference in New Issue
Block a user