diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a4234a837032..95a3cc367d50 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -653,6 +653,7 @@ struct i915_suspend_saved_registers {
 
 struct intel_gen6_power_mgmt {
 	struct work_struct work;
+	struct delayed_work vlv_work;
 	u32 pm_iir;
 	/* lock - irqsave spinlock that protectects the work_struct and
 	 * pm_iir. */
@@ -663,6 +664,7 @@ struct intel_gen6_power_mgmt {
 	u8 cur_delay;
 	u8 min_delay;
 	u8 max_delay;
+	u8 rpe_delay;
 	u8 hw_max;
 
 	struct delayed_work delayed_resume_work;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 7c81f0fb721a..3cf646cdab1a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -725,6 +725,17 @@ static void gen6_pm_rps_work(struct work_struct *work)
 			gen6_set_rps(dev_priv->dev, new_delay);
 	}
 
+	if (IS_VALLEYVIEW(dev_priv->dev)) {
+		/*
+		 * On VLV, when we enter RC6 we may not be at the minimum
+		 * voltage level, so arm a timer to check.  It should only
+		 * fire when there's activity or once after we've entered
+		 * RC6, and then won't be re-armed until the next RPS interrupt.
+		 */
+		mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
+				 msecs_to_jiffies(100));
+	}
+
 	mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 2557926553b3..93b01e197f42 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2822,6 +2822,23 @@ int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
 	return val & 0xff;
 }
 
+static void vlv_rps_timer_work(struct work_struct *work)
+{
+	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+						    rps.vlv_work.work);
+
+	/*
+	 * Timer fired, we must be idle.  Drop to min voltage state.
+	 * Note: we use RPe here since it should match the
+	 * Vmin we were shooting for.  That should give us better
+	 * perf when we come back out of RC6 than if we used the
+	 * min freq available.
+	 */
+	mutex_lock(&dev_priv->rps.hw_lock);
+	valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
 static void valleyview_enable_rps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2886,6 +2903,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
 	rpe = valleyview_rps_rpe_freq(dev_priv);
 	DRM_DEBUG_DRIVER("RPe GPU freq: %d\n",
 			 vlv_gpu_freq(dev_priv->mem_freq, rpe));
+	dev_priv->rps.rpe_delay = rpe;
 
 	val = valleyview_rps_min_freq(dev_priv);
 	DRM_DEBUG_DRIVER("min GPU freq: %d\n", vlv_gpu_freq(dev_priv->mem_freq,
@@ -2895,6 +2913,8 @@ static void valleyview_enable_rps(struct drm_device *dev)
 	DRM_DEBUG_DRIVER("setting GPU freq to %d\n",
 			 vlv_gpu_freq(dev_priv->mem_freq, rpe));
 
+	INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
+
 	valleyview_set_rps(dev_priv->dev, rpe);
 
 	/* requires MSI enabled */
@@ -3637,6 +3657,8 @@ void intel_disable_gt_powersave(struct drm_device *dev)
 		ironlake_disable_rc6(dev);
 	} else if (INTEL_INFO(dev)->gen >= 6) {
 		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
+		if (IS_VALLEYVIEW(dev))
+			cancel_delayed_work_sync(&dev_priv->rps.vlv_work);
 		mutex_lock(&dev_priv->rps.hw_lock);
 		gen6_disable_rps(dev);
 		mutex_unlock(&dev_priv->rps.hw_lock);