forked from Minki/linux
drm/i915: Remove rpm asserts that use i915
Quite a few of the call points have already switched to the version working directly on the runtime_pm structure, so let's switch over the rest and kill the i915-based asserts. v2: rebase Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Cc: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Acked-by: Imre Deak <imre.deak@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20190613232156.34940-3-daniele.ceraolospurio@intel.com
This commit is contained in:
parent
d5b6c275d0
commit
87b391b951
@ -308,7 +308,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
|
||||
goto err_fence;
|
||||
|
||||
/* Mark as being mmapped into userspace for later revocation */
|
||||
assert_rpm_wakelock_held(i915);
|
||||
assert_rpm_wakelock_held(&i915->runtime_pm);
|
||||
if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
|
||||
list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
|
||||
if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
|
||||
|
@ -132,7 +132,7 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
|
||||
struct i915_fence_reg *reg;
|
||||
i915_reg_t fence_reg_lo, fence_reg_hi;
|
||||
|
||||
assert_rpm_wakelock_held(dev_priv);
|
||||
assert_rpm_wakelock_held(&dev_priv->runtime_pm);
|
||||
|
||||
if (WARN_ON(fence >= vgpu_fence_sz(vgpu)))
|
||||
return;
|
||||
|
@ -360,7 +360,7 @@ int i915_vma_pin_fence(struct i915_vma *vma)
|
||||
* Note that we revoke fences on runtime suspend. Therefore the user
|
||||
* must keep the device awake whilst using the fence.
|
||||
*/
|
||||
assert_rpm_wakelock_held(vma->vm->i915);
|
||||
assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
|
||||
|
||||
/* Just update our place in the LRU if our fence is getting reused. */
|
||||
if (vma->fence) {
|
||||
|
@ -589,7 +589,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
|
||||
|
||||
void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
assert_rpm_wakelock_held(dev_priv);
|
||||
assert_rpm_wakelock_held(&dev_priv->runtime_pm);
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
|
||||
@ -598,7 +598,7 @@ void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
|
||||
|
||||
void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
assert_rpm_wakelock_held(dev_priv);
|
||||
assert_rpm_wakelock_held(&dev_priv->runtime_pm);
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
if (!dev_priv->guc.interrupts.enabled) {
|
||||
@ -612,7 +612,7 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
|
||||
|
||||
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
assert_rpm_wakelock_held(dev_priv);
|
||||
assert_rpm_wakelock_held(&dev_priv->runtime_pm);
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
dev_priv->guc.interrupts.enabled = false;
|
||||
|
@ -364,7 +364,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
|
||||
int err;
|
||||
|
||||
/* Access through the GTT requires the device to be awake. */
|
||||
assert_rpm_wakelock_held(vma->vm->i915);
|
||||
assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
|
||||
|
||||
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
|
||||
if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
|
||||
|
@ -273,7 +273,7 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
|
||||
fw_size = dev_priv->csr.dmc_fw_size;
|
||||
assert_rpm_wakelock_held(dev_priv);
|
||||
assert_rpm_wakelock_held(&dev_priv->runtime_pm);
|
||||
|
||||
preempt_disable();
|
||||
|
||||
|
@ -696,7 +696,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
|
||||
|
||||
WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
|
||||
"DC5 already programmed to be enabled.\n");
|
||||
assert_rpm_wakelock_held(dev_priv);
|
||||
assert_rpm_wakelock_held(&dev_priv->runtime_pm);
|
||||
|
||||
assert_csr_loaded(dev_priv);
|
||||
}
|
||||
@ -1814,7 +1814,7 @@ release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
|
||||
* wakeref to make the state checker happy about the HW access during
|
||||
* power well disabling.
|
||||
*/
|
||||
assert_rpm_raw_wakeref_held(dev_priv);
|
||||
assert_rpm_raw_wakeref_held(&dev_priv->runtime_pm);
|
||||
wakeref = intel_runtime_pm_get(dev_priv);
|
||||
|
||||
for_each_power_domain(domain, mask) {
|
||||
|
@ -1640,7 +1640,7 @@ assert_rpm_device_not_suspended(struct i915_runtime_pm *rpm)
|
||||
}
|
||||
|
||||
static inline void
|
||||
____assert_rpm_raw_wakeref_held(struct i915_runtime_pm *rpm, int wakeref_count)
|
||||
__assert_rpm_raw_wakeref_held(struct i915_runtime_pm *rpm, int wakeref_count)
|
||||
{
|
||||
assert_rpm_device_not_suspended(rpm);
|
||||
WARN_ONCE(!intel_rpm_raw_wakeref_count(wakeref_count),
|
||||
@ -1648,35 +1648,23 @@ ____assert_rpm_raw_wakeref_held(struct i915_runtime_pm *rpm, int wakeref_count)
|
||||
}
|
||||
|
||||
static inline void
|
||||
____assert_rpm_wakelock_held(struct i915_runtime_pm *rpm, int wakeref_count)
|
||||
__assert_rpm_wakelock_held(struct i915_runtime_pm *rpm, int wakeref_count)
|
||||
{
|
||||
____assert_rpm_raw_wakeref_held(rpm, wakeref_count);
|
||||
__assert_rpm_raw_wakeref_held(rpm, wakeref_count);
|
||||
WARN_ONCE(!intel_rpm_wakelock_count(wakeref_count),
|
||||
"RPM wakelock ref not held during HW access\n");
|
||||
}
|
||||
|
||||
static inline void
|
||||
__assert_rpm_raw_wakeref_held(struct i915_runtime_pm *rpm)
|
||||
assert_rpm_raw_wakeref_held(struct i915_runtime_pm *rpm)
|
||||
{
|
||||
____assert_rpm_raw_wakeref_held(rpm, atomic_read(&rpm->wakeref_count));
|
||||
__assert_rpm_raw_wakeref_held(rpm, atomic_read(&rpm->wakeref_count));
|
||||
}
|
||||
|
||||
static inline void
|
||||
assert_rpm_raw_wakeref_held(struct drm_i915_private *i915)
|
||||
assert_rpm_wakelock_held(struct i915_runtime_pm *rpm)
|
||||
{
|
||||
__assert_rpm_raw_wakeref_held(&i915->runtime_pm);
|
||||
}
|
||||
|
||||
static inline void
|
||||
__assert_rpm_wakelock_held(struct i915_runtime_pm *rpm)
|
||||
{
|
||||
____assert_rpm_wakelock_held(rpm, atomic_read(&rpm->wakeref_count));
|
||||
}
|
||||
|
||||
static inline void
|
||||
assert_rpm_wakelock_held(struct drm_i915_private *i915)
|
||||
{
|
||||
__assert_rpm_wakelock_held(&i915->runtime_pm);
|
||||
__assert_rpm_wakelock_held(rpm, atomic_read(&rpm->wakeref_count));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -335,10 +335,10 @@ intel_runtime_pm_acquire(struct i915_runtime_pm *rpm, bool wakelock)
|
||||
{
|
||||
if (wakelock) {
|
||||
atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
|
||||
__assert_rpm_wakelock_held(rpm);
|
||||
assert_rpm_wakelock_held(rpm);
|
||||
} else {
|
||||
atomic_inc(&rpm->wakeref_count);
|
||||
__assert_rpm_raw_wakeref_held(rpm);
|
||||
assert_rpm_raw_wakeref_held(rpm);
|
||||
}
|
||||
}
|
||||
|
||||
@ -346,10 +346,10 @@ static void
|
||||
intel_runtime_pm_release(struct i915_runtime_pm *rpm, int wakelock)
|
||||
{
|
||||
if (wakelock) {
|
||||
__assert_rpm_wakelock_held(rpm);
|
||||
assert_rpm_wakelock_held(rpm);
|
||||
atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
|
||||
} else {
|
||||
__assert_rpm_raw_wakeref_held(rpm);
|
||||
assert_rpm_raw_wakeref_held(rpm);
|
||||
}
|
||||
|
||||
__intel_wakeref_dec_and_check_tracking(rpm);
|
||||
@ -465,7 +465,7 @@ intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
|
||||
{
|
||||
struct i915_runtime_pm *rpm = &i915->runtime_pm;
|
||||
|
||||
__assert_rpm_wakelock_held(rpm);
|
||||
assert_rpm_wakelock_held(rpm);
|
||||
pm_runtime_get_noresume(rpm->kdev);
|
||||
|
||||
intel_runtime_pm_acquire(rpm, true);
|
||||
|
@ -583,7 +583,7 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
|
||||
if (!uncore->funcs.force_wake_get)
|
||||
return;
|
||||
|
||||
__assert_rpm_wakelock_held(uncore->rpm);
|
||||
assert_rpm_wakelock_held(uncore->rpm);
|
||||
|
||||
spin_lock_irqsave(&uncore->lock, irqflags);
|
||||
__intel_uncore_forcewake_get(uncore, fw_domains);
|
||||
@ -737,7 +737,7 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
|
||||
if (!uncore->funcs.force_wake_get)
|
||||
return;
|
||||
|
||||
__assert_rpm_wakelock_held(uncore->rpm);
|
||||
assert_rpm_wakelock_held(uncore->rpm);
|
||||
|
||||
fw_domains &= uncore->fw_domains;
|
||||
WARN(fw_domains & ~uncore->fw_domains_active,
|
||||
@ -1054,7 +1054,7 @@ unclaimed_reg_debug(struct intel_uncore *uncore,
|
||||
|
||||
#define GEN2_READ_HEADER(x) \
|
||||
u##x val = 0; \
|
||||
__assert_rpm_wakelock_held(uncore->rpm);
|
||||
assert_rpm_wakelock_held(uncore->rpm);
|
||||
|
||||
#define GEN2_READ_FOOTER \
|
||||
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
|
||||
@ -1096,7 +1096,7 @@ __gen2_read(64)
|
||||
u32 offset = i915_mmio_reg_offset(reg); \
|
||||
unsigned long irqflags; \
|
||||
u##x val = 0; \
|
||||
__assert_rpm_wakelock_held(uncore->rpm); \
|
||||
assert_rpm_wakelock_held(uncore->rpm); \
|
||||
spin_lock_irqsave(&uncore->lock, irqflags); \
|
||||
unclaimed_reg_debug(uncore, reg, true, true)
|
||||
|
||||
@ -1170,7 +1170,7 @@ __gen6_read(64)
|
||||
|
||||
#define GEN2_WRITE_HEADER \
|
||||
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
|
||||
__assert_rpm_wakelock_held(uncore->rpm); \
|
||||
assert_rpm_wakelock_held(uncore->rpm); \
|
||||
|
||||
#define GEN2_WRITE_FOOTER
|
||||
|
||||
@ -1208,7 +1208,7 @@ __gen2_write(32)
|
||||
u32 offset = i915_mmio_reg_offset(reg); \
|
||||
unsigned long irqflags; \
|
||||
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
|
||||
__assert_rpm_wakelock_held(uncore->rpm); \
|
||||
assert_rpm_wakelock_held(uncore->rpm); \
|
||||
spin_lock_irqsave(&uncore->lock, irqflags); \
|
||||
unclaimed_reg_debug(uncore, reg, false, true)
|
||||
|
||||
|
@ -110,7 +110,7 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
|
||||
}
|
||||
|
||||
/* Our mission is that we only extend an already active wakeref */
|
||||
assert_rpm_wakelock_held(wf->i915);
|
||||
assert_rpm_wakelock_held(&wf->i915->runtime_pm);
|
||||
|
||||
if (!refcount_inc_not_zero(&wf->count)) {
|
||||
spin_lock_irqsave(&wf->lock, flags);
|
||||
|
Loading…
Reference in New Issue
Block a user